2026-04-15T13:28:21.458 INFO:root:teuthology version: 1.2.4.dev6+g1c580df7a 2026-04-15T13:28:21.464 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-15T13:28:21.491 INFO:teuthology.run:Config: archive_path: /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353 branch: wip-sse-s3-on-v20.2.0 description: orch:cephadm:smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} email: null first_in_suite: false flavor: default job_id: '5353' ktype: distro last_in_suite: false machine_type: vps name: supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps no_nested_subset: false openstack: - volumes: count: 4 size: 10 os_type: ubuntu os_version: '22.04' overrides: admin_socket: branch: wip-sse-s3-on-v20.2.0 ansible.cephlab: branch: main repo: https://github.com/kshtsk/ceph-cm-ansible.git skip_tags: nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs vars: logical_volumes: lv_1: scratch_dev: true size: 25%VG vg: vg_nvme lv_2: scratch_dev: true size: 25%VG vg: vg_nvme lv_3: scratch_dev: true size: 25%VG vg: vg_nvme lv_4: scratch_dev: true size: 25%VG vg: vg_nvme timezone: UTC volume_groups: vg_nvme: pvs: /dev/vdb,/dev/vdc,/dev/vdd,/dev/vde ceph: conf: mgr: debug mgr: 20 debug ms: 1 mon: debug mon: 20 debug ms: 1 debug paxos: 20 osd: debug ms: 1 debug osd: 20 osd mclock iops capacity threshold hdd: 49000 osd shutdown pgref assert: true flavor: default log-ignorelist: - \(MDS_ALL_DOWN\) - \(MDS_UP_LESS_THAN_MAX\) - CEPHADM_DAEMON_PLACE_FAIL - CEPHADM_FAILED_DAEMON log-only-match: - CEPHADM_ sha1: 187293b0588135c3607a12257332b6880af4eff9 ceph-deploy: conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: {} cephadm: cephadm_binary_url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm containers: image: harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 install: ceph: flavor: default sha1: 187293b0588135c3607a12257332b6880af4eff9 extra_system_packages: deb: - python3-jmespath - python3-xmltodict - s3cmd rpm: - bzip2 - perl-Test-Harness - python3-jmespath - python3-xmltodict - s3cmd repos: - name: ceph-source priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-19-g7ec4401a095/el9.clyso/SRPMS - name: ceph-noarch priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-19-g7ec4401a095/el9.clyso/noarch - name: ceph priority: 1 url: https://s3.clyso.com/ces-packages/components/ceph-debug/rpm-20.2.0-19-g7ec4401a095/el9.clyso/x86_64 s3tests: sha1: e0c4ff71baef6d5126a0201df5fe54196d89b296 workunit: branch: tt-wip-sse-s3-on-v20.2.0 sha1: d26583cfb673e959af010b749fed6e7dba141caf owner: supriti priority: 1000 repo: https://github.com/ceph/ceph.git roles: - - host.a - client.0 - - host.b - client.1 seed: 9500 sha1: 187293b0588135c3607a12257332b6880af4eff9 sleep_before_teardown: 0 suite: orch:cephadm:smoke-roleless suite_branch: tt-wip-sse-s3-on-v20.2.0 suite_path: /home/teuthos/src/git.local_ceph_d26583cfb673e959af010b749fed6e7dba141caf/qa suite_relpath: qa suite_repo: http://git.local/ceph.git suite_sha1: d26583cfb673e959af010b749fed6e7dba141caf targets: vm06.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDRRRCM/WCbWEPBUTPFfCVAAU/PE9aS773u8iXdOsYRdTPCAF/WnaY2Bwvb2HCn/pp+Wkfsfe76knmR1jwUy1Mc= vm09.local: ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMqfHMmH1pIEySGb3/L4LmvcT9Xz7UfMRKOBVwPxsz0vzAG6a9u3PW+gYWieN6xbwjmTGXoD1YH2zYO11+sFICs= tasks: - nvme_loop: null - cephadm: roleless: true - cephadm.shell: host.a: - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - vip: null - cephadm.shell: host.a: - ceph orch device ls --refresh - cephadm.apply: specs: - placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 - placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: '{{VIP0}}/{{VIPPREFIXLEN}}' - cephadm.wait_for_service: service: rgw.foo - cephadm.wait_for_service: service: ingress.rgw.foo - cephadm.shell: host.a: - "echo \"Check while healthy...\"\ncurl http://{{VIP0}}:9000/\n\n# stop each\ \ rgw in turn\necho \"Check with each rgw stopped in turn...\"\nfor rgw in `ceph\ \ orch ps | grep ^rgw.foo. | awk '{print $1}'`; do\n ceph orch daemon stop\ \ $rgw\n timeout 300 bash -c \"while ! ceph orch ps | grep $rgw | grep stopped;\ \ do echo 'Waiting for $rgw to stop'; ceph orch ps --daemon-type rgw; ceph health\ \ detail; sleep 5 ; done\"\n timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/\ \ ; do echo 'Waiting for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\ \n ceph orch daemon start $rgw\n timeout 300 bash -c \"while ! ceph orch ps\ \ | grep $rgw | grep running; do echo 'Waiting for $rgw to start'; ceph orch\ \ ps --daemon-type rgw; ceph health detail; sleep 5 ; done\"\ndone\n\n# stop\ \ each haproxy in turn\necho \"Check with each haproxy down in turn...\"\nfor\ \ haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do\n\ \ ceph orch daemon stop $haproxy\n timeout 300 bash -c \"while ! ceph orch\ \ ps | grep $haproxy | grep stopped; do echo 'Waiting for $haproxy to stop';\ \ ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\n\ \ timeout 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting\ \ for http://{{VIP0}}:9000/ to be available'; sleep 1 ; done\"\n ceph orch\ \ daemon start $haproxy\n timeout 300 bash -c \"while ! ceph orch ps | grep\ \ $haproxy | grep running; do echo 'Waiting for $haproxy to start'; ceph orch\ \ ps --daemon-type haproxy; ceph health detail; sleep 5 ; done\"\ndone\n\ntimeout\ \ 300 bash -c \"while ! curl http://{{VIP0}}:9000/ ; do echo 'Waiting for http://{{VIP0}}:9000/\ \ to be available'; sleep 1 ; done\"\n" - cephadm.shell: host.a: - stat -c '%u %g' /var/log/ceph | grep '167 167' - ceph orch status - ceph orch ps - ceph orch ls - ceph orch host ls - ceph orch device ls - ceph orch ls | grep '^osd.all-available-devices ' teuthology: fragments_dropped: [] meta: {} postmerge: [] teuthology_branch: clyso-debian-13 teuthology_repo: https://github.com/kshtsk/teuthology teuthology_sha1: 1c580df7a9c7c2aadc272da296344fd99f27c444 timestamp: 2026-04-15_10:39:10 tube: vps user: supriti verbose: false worker_log: /home/teuthos/.teuthology/dispatcher/dispatcher.vps.3072398 2026-04-15T13:28:21.492 INFO:teuthology.run:suite_path is set to /home/teuthos/src/git.local_ceph_d26583cfb673e959af010b749fed6e7dba141caf/qa; will attempt to use it 2026-04-15T13:28:21.492 INFO:teuthology.run:Found tasks at /home/teuthos/src/git.local_ceph_d26583cfb673e959af010b749fed6e7dba141caf/qa/tasks 2026-04-15T13:28:21.492 INFO:teuthology.run_tasks:Running task internal.save_config... 2026-04-15T13:28:21.492 INFO:teuthology.task.internal:Saving configuration 2026-04-15T13:28:21.499 INFO:teuthology.run_tasks:Running task internal.check_lock... 2026-04-15T13:28:21.500 INFO:teuthology.task.internal.check_lock:Checking locks... 2026-04-15T13:28:21.508 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm06.local', 'description': '/archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-15 13:27:06.205682', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:06', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDRRRCM/WCbWEPBUTPFfCVAAU/PE9aS773u8iXdOsYRdTPCAF/WnaY2Bwvb2HCn/pp+Wkfsfe76knmR1jwUy1Mc='} 2026-04-15T13:28:21.515 DEBUG:teuthology.task.internal.check_lock:machine status is {'name': 'vm09.local', 'description': '/archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353', 'up': True, 'machine_type': 'vps', 'is_vm': True, 'vm_host': {'name': 'localhost', 'description': None, 'up': True, 'machine_type': 'libvirt', 'is_vm': False, 'vm_host': None, 'os_type': None, 'os_version': None, 'arch': None, 'locked': True, 'locked_since': None, 'locked_by': None, 'mac_address': None, 'ssh_pub_key': None}, 'os_type': 'ubuntu', 'os_version': '22.04', 'arch': 'x86_64', 'locked': True, 'locked_since': '2026-04-15 13:27:06.206146', 'locked_by': 'supriti', 'mac_address': '52:55:00:00:00:09', 'ssh_pub_key': 'ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMqfHMmH1pIEySGb3/L4LmvcT9Xz7UfMRKOBVwPxsz0vzAG6a9u3PW+gYWieN6xbwjmTGXoD1YH2zYO11+sFICs='} 2026-04-15T13:28:21.515 INFO:teuthology.run_tasks:Running task internal.add_remotes... 2026-04-15T13:28:21.516 INFO:teuthology.task.internal:roles: ubuntu@vm06.local - ['host.a', 'client.0'] 2026-04-15T13:28:21.516 INFO:teuthology.task.internal:roles: ubuntu@vm09.local - ['host.b', 'client.1'] 2026-04-15T13:28:21.516 INFO:teuthology.run_tasks:Running task console_log... 2026-04-15T13:28:21.524 DEBUG:teuthology.task.console_log:vm06 does not support IPMI; excluding 2026-04-15T13:28:21.532 DEBUG:teuthology.task.console_log:vm09 does not support IPMI; excluding 2026-04-15T13:28:21.532 DEBUG:teuthology.exit:Installing handler: Handler(exiter=, func=.kill_console_loggers at 0x7fe942e88820>, signals=[15]) 2026-04-15T13:28:21.532 INFO:teuthology.run_tasks:Running task internal.connect... 2026-04-15T13:28:21.533 INFO:teuthology.task.internal:Opening connections... 2026-04-15T13:28:21.533 DEBUG:teuthology.task.internal:connecting to ubuntu@vm06.local 2026-04-15T13:28:21.534 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T13:28:21.593 DEBUG:teuthology.task.internal:connecting to ubuntu@vm09.local 2026-04-15T13:28:21.593 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T13:28:21.654 INFO:teuthology.run_tasks:Running task internal.push_inventory... 2026-04-15T13:28:21.655 DEBUG:teuthology.orchestra.run.vm06:> uname -m 2026-04-15T13:28:21.682 INFO:teuthology.orchestra.run.vm06.stdout:x86_64 2026-04-15T13:28:21.682 DEBUG:teuthology.orchestra.run.vm06:> cat /etc/os-release 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:NAME="Ubuntu" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_ID="22.04" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:VERSION_CODENAME=jammy 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:ID=ubuntu 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:ID_LIKE=debian 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:HOME_URL="https://www.ubuntu.com/" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-04-15T13:28:21.729 INFO:teuthology.orchestra.run.vm06.stdout:UBUNTU_CODENAME=jammy 2026-04-15T13:28:21.729 INFO:teuthology.lock.ops:Updating vm06.local on lock server 2026-04-15T13:28:21.736 DEBUG:teuthology.orchestra.run.vm09:> uname -m 2026-04-15T13:28:21.745 INFO:teuthology.orchestra.run.vm09.stdout:x86_64 2026-04-15T13:28:21.745 DEBUG:teuthology.orchestra.run.vm09:> cat /etc/os-release 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:PRETTY_NAME="Ubuntu 22.04.5 LTS" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:NAME="Ubuntu" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_ID="22.04" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:VERSION="22.04.5 LTS (Jammy Jellyfish)" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:VERSION_CODENAME=jammy 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:ID=ubuntu 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:ID_LIKE=debian 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:HOME_URL="https://www.ubuntu.com/" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:SUPPORT_URL="https://help.ubuntu.com/" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 2026-04-15T13:28:21.791 INFO:teuthology.orchestra.run.vm09.stdout:UBUNTU_CODENAME=jammy 2026-04-15T13:28:21.792 INFO:teuthology.lock.ops:Updating vm09.local on lock server 2026-04-15T13:28:21.797 INFO:teuthology.run_tasks:Running task internal.serialize_remote_roles... 2026-04-15T13:28:21.799 INFO:teuthology.run_tasks:Running task internal.check_conflict... 2026-04-15T13:28:21.800 INFO:teuthology.task.internal:Checking for old test directory... 2026-04-15T13:28:21.800 DEBUG:teuthology.orchestra.run.vm06:> test '!' -e /home/ubuntu/cephtest 2026-04-15T13:28:21.801 DEBUG:teuthology.orchestra.run.vm09:> test '!' -e /home/ubuntu/cephtest 2026-04-15T13:28:21.835 INFO:teuthology.run_tasks:Running task internal.check_ceph_data... 2026-04-15T13:28:21.836 INFO:teuthology.task.internal:Checking for non-empty /var/lib/ceph... 2026-04-15T13:28:21.836 DEBUG:teuthology.orchestra.run.vm06:> test -z $(ls -A /var/lib/ceph) 2026-04-15T13:28:21.846 DEBUG:teuthology.orchestra.run.vm09:> test -z $(ls -A /var/lib/ceph) 2026-04-15T13:28:21.849 INFO:teuthology.orchestra.run.vm06.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-15T13:28:21.879 INFO:teuthology.orchestra.run.vm09.stderr:ls: cannot access '/var/lib/ceph': No such file or directory 2026-04-15T13:28:21.879 INFO:teuthology.run_tasks:Running task internal.vm_setup... 2026-04-15T13:28:21.889 DEBUG:teuthology.orchestra.run.vm06:> test -e /ceph-qa-ready 2026-04-15T13:28:21.896 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:28:22.140 DEBUG:teuthology.orchestra.run.vm09:> test -e /ceph-qa-ready 2026-04-15T13:28:22.143 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:28:22.391 INFO:teuthology.run_tasks:Running task internal.base... 2026-04-15T13:28:22.393 INFO:teuthology.task.internal:Creating test directory... 2026-04-15T13:28:22.393 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-15T13:28:22.394 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest 2026-04-15T13:28:22.397 INFO:teuthology.run_tasks:Running task internal.archive_upload... 2026-04-15T13:28:22.398 INFO:teuthology.run_tasks:Running task internal.archive... 2026-04-15T13:28:22.399 INFO:teuthology.task.internal:Creating archive directory... 2026-04-15T13:28:22.399 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-15T13:28:22.438 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive 2026-04-15T13:28:22.444 INFO:teuthology.run_tasks:Running task internal.coredump... 2026-04-15T13:28:22.445 INFO:teuthology.task.internal:Enabling coredump saving... 2026-04-15T13:28:22.445 DEBUG:teuthology.orchestra.run.vm06:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-15T13:28:22.484 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:28:22.484 DEBUG:teuthology.orchestra.run.vm09:> test -f /run/.containerenv -o -f /.dockerenv 2026-04-15T13:28:22.486 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:28:22.486 DEBUG:teuthology.orchestra.run.vm06:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-15T13:28:22.526 DEBUG:teuthology.orchestra.run.vm09:> install -d -m0755 -- /home/ubuntu/cephtest/archive/coredump && sudo sysctl -w kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core && echo kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core | sudo tee -a /etc/sysctl.conf 2026-04-15T13:28:22.535 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T13:28:22.535 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = /home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T13:28:22.539 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T13:28:22.540 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern=/home/ubuntu/cephtest/archive/coredump/%t.%p.core 2026-04-15T13:28:22.541 INFO:teuthology.run_tasks:Running task internal.sudo... 2026-04-15T13:28:22.543 INFO:teuthology.task.internal:Configuring sudo... 2026-04-15T13:28:22.543 DEBUG:teuthology.orchestra.run.vm06:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-15T13:28:22.586 DEBUG:teuthology.orchestra.run.vm09:> sudo sed -i.orig.teuthology -e 's/^\([^#]*\) \(requiretty\)/\1 !\2/g' -e 's/^\([^#]*\) !\(visiblepw\)/\1 \2/g' /etc/sudoers 2026-04-15T13:28:22.593 INFO:teuthology.run_tasks:Running task internal.syslog... 2026-04-15T13:28:22.596 INFO:teuthology.task.internal.syslog:Starting syslog monitoring... 2026-04-15T13:28:22.596 DEBUG:teuthology.orchestra.run.vm06:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-15T13:28:22.638 DEBUG:teuthology.orchestra.run.vm09:> mkdir -p -m0755 -- /home/ubuntu/cephtest/archive/syslog 2026-04-15T13:28:22.641 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T13:28:22.684 DEBUG:teuthology.orchestra.run.vm06:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T13:28:22.728 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:28:22.728 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-15T13:28:22.776 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T13:28:22.780 DEBUG:teuthology.orchestra.run.vm09:> install -m 666 /dev/null /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T13:28:22.829 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:28:22.829 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/rsyslog.d/80-cephtest.conf 2026-04-15T13:28:22.878 DEBUG:teuthology.orchestra.run.vm06:> sudo service rsyslog restart 2026-04-15T13:28:22.879 DEBUG:teuthology.orchestra.run.vm09:> sudo service rsyslog restart 2026-04-15T13:28:22.935 INFO:teuthology.run_tasks:Running task internal.timer... 2026-04-15T13:28:22.937 INFO:teuthology.task.internal:Starting timer... 2026-04-15T13:28:22.937 INFO:teuthology.run_tasks:Running task pcp... 2026-04-15T13:28:22.941 INFO:teuthology.run_tasks:Running task selinux... 2026-04-15T13:28:22.943 INFO:teuthology.task.selinux:Excluding vm06: VMs are not yet supported 2026-04-15T13:28:22.943 INFO:teuthology.task.selinux:Excluding vm09: VMs are not yet supported 2026-04-15T13:28:22.943 DEBUG:teuthology.task.selinux:Getting current SELinux state 2026-04-15T13:28:22.943 DEBUG:teuthology.task.selinux:Existing SELinux modes: {} 2026-04-15T13:28:22.944 INFO:teuthology.task.selinux:Putting SELinux into permissive mode 2026-04-15T13:28:22.944 INFO:teuthology.run_tasks:Running task ansible.cephlab... 2026-04-15T13:28:22.945 DEBUG:teuthology.task:Applying overrides for task ansible.cephlab: {'branch': 'main', 'repo': 'https://github.com/kshtsk/ceph-cm-ansible.git', 'skip_tags': 'nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs', 'vars': {'logical_volumes': {'lv_1': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_2': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_3': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}, 'lv_4': {'scratch_dev': True, 'size': '25%VG', 'vg': 'vg_nvme'}}, 'timezone': 'UTC', 'volume_groups': {'vg_nvme': {'pvs': '/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde'}}}} 2026-04-15T13:28:22.945 DEBUG:teuthology.repo_utils:Setting repo remote to https://github.com/kshtsk/ceph-cm-ansible.git 2026-04-15T13:28:22.947 INFO:teuthology.repo_utils:Fetching github.com_kshtsk_ceph-cm-ansible_main from origin 2026-04-15T13:28:23.711 DEBUG:teuthology.repo_utils:Resetting repo at /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main to origin/main 2026-04-15T13:28:23.716 INFO:teuthology.task.ansible:Playbook: [{'import_playbook': 'ansible_managed.yml'}, {'import_playbook': 'teuthology.yml'}, {'hosts': 'testnodes', 'tasks': [{'set_fact': {'ran_from_cephlab_playbook': True}}]}, {'import_playbook': 'testnodes.yml'}, {'import_playbook': 'container-host.yml'}, {'import_playbook': 'cobbler.yml'}, {'import_playbook': 'paddles.yml'}, {'import_playbook': 'pulpito.yml'}, {'hosts': 'testnodes', 'become': True, 'tasks': [{'name': 'Touch /ceph-qa-ready', 'file': {'path': '/ceph-qa-ready', 'state': 'touch'}, 'when': 'ran_from_cephlab_playbook|bool'}]}] 2026-04-15T13:28:23.716 DEBUG:teuthology.task.ansible:Running ansible-playbook -v --extra-vars '{"ansible_ssh_user": "ubuntu", "logical_volumes": {"lv_1": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_2": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_3": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}, "lv_4": {"scratch_dev": true, "size": "25%VG", "vg": "vg_nvme"}}, "timezone": "UTC", "volume_groups": {"vg_nvme": {"pvs": "/dev/vdb,/dev/vdc,/dev/vdd,/dev/vde"}}}' -i /tmp/teuth_ansible_inventory5lk0prj0 --limit vm06.local,vm09.local /home/teuthos/src/github.com_kshtsk_ceph-cm-ansible_main/cephlab.yml --skip-tags nagios,monitoring-scripts,hostname,pubkeys,zap,sudoers,kerberos,ntp-client,resolvconf,cpan,nfs 2026-04-15T13:30:55.518 DEBUG:teuthology.task.ansible:Reconnecting to [Remote(name='ubuntu@vm06.local'), Remote(name='ubuntu@vm09.local')] 2026-04-15T13:30:55.519 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm06.local' 2026-04-15T13:30:55.520 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm06.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T13:30:55.584 DEBUG:teuthology.orchestra.run.vm06:> true 2026-04-15T13:30:55.829 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm06.local' 2026-04-15T13:30:55.829 INFO:teuthology.orchestra.remote:Trying to reconnect to host 'ubuntu@vm09.local' 2026-04-15T13:30:55.830 DEBUG:teuthology.orchestra.connection:{'hostname': 'vm09.local', 'username': 'ubuntu', 'timeout': 60} 2026-04-15T13:30:55.889 DEBUG:teuthology.orchestra.run.vm09:> true 2026-04-15T13:30:56.141 INFO:teuthology.orchestra.remote:Successfully reconnected to host 'ubuntu@vm09.local' 2026-04-15T13:30:56.141 INFO:teuthology.run_tasks:Running task clock... 2026-04-15T13:30:56.143 INFO:teuthology.task.clock:Syncing clocks and checking initial clock skew... 2026-04-15T13:30:56.143 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-15T13:30:56.143 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T13:30:56.145 INFO:teuthology.orchestra.run:Running command with timeout 360 2026-04-15T13:30:56.145 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ntp.service || sudo systemctl stop ntpd.service || sudo systemctl stop chronyd.service ; sudo ntpd -gq || sudo chronyc makestep ; sudo systemctl start ntp.service || sudo systemctl start ntpd.service || sudo systemctl start chronyd.service ; PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T13:30:56.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-04-15T13:30:56.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Command line: ntpd -gq 2026-04-15T13:30:56.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: ---------------------------------------------------- 2026-04-15T13:30:56.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: ntp-4 is maintained by Network Time Foundation, 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: corporation. Support and training for ntp-4 are 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: available at https://www.nwtime.org/support 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: ---------------------------------------------------- 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: proto: precision = 0.030 usec (-25) 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: basedate set to 2022-02-04 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: gps base set to 2022-02-06 (week 2196) 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-04-15T13:30:56.162 INFO:teuthology.orchestra.run.vm06.stderr:15 Apr 13:30:56 ntpd[16280]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 109 days ago 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listen and drop on 0 v6wildcard [::]:123 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listen normally on 2 lo 127.0.0.1:123 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listen normally on 3 ens3 192.168.123.106:123 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listen normally on 4 lo [::1]:123 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:6%2]:123 2026-04-15T13:30:56.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:56 ntpd[16280]: Listening on routing socket on fd #22 for interface updates 2026-04-15T13:30:56.199 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: ntpd 4.2.8p15@1.3728-o Wed Feb 16 17:13:02 UTC 2022 (1): Starting 2026-04-15T13:30:56.199 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Command line: ntpd -gq 2026-04-15T13:30:56.199 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: ---------------------------------------------------- 2026-04-15T13:30:56.199 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: ntp-4 is maintained by Network Time Foundation, 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Inc. (NTF), a non-profit 501(c)(3) public-benefit 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: corporation. Support and training for ntp-4 are 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: available at https://www.nwtime.org/support 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: ---------------------------------------------------- 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: proto: precision = 0.029 usec (-25) 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: basedate set to 2022-02-04 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: gps base set to 2022-02-06 (week 2196) 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): good hash signature 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): loaded, expire=2025-12-28T00:00:00Z last=2017-01-01T00:00:00Z ofs=37 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stderr:15 Apr 13:30:56 ntpd[16271]: leapsecond file ('/usr/share/zoneinfo/leap-seconds.list'): expired 109 days ago 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listen and drop on 0 v6wildcard [::]:123 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listen and drop on 1 v4wildcard 0.0.0.0:123 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listen normally on 2 lo 127.0.0.1:123 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listen normally on 3 ens3 192.168.123.109:123 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listen normally on 4 lo [::1]:123 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listen normally on 5 ens3 [fe80::5055:ff:fe00:9%2]:123 2026-04-15T13:30:56.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:56 ntpd[16271]: Listening on routing socket on fd #22 for interface updates 2026-04-15T13:30:57.163 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:57 ntpd[16280]: Soliciting pool server 178.215.228.24 2026-04-15T13:30:57.200 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:57 ntpd[16271]: Soliciting pool server 178.215.228.24 2026-04-15T13:30:58.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:58 ntpd[16280]: Soliciting pool server 46.38.241.235 2026-04-15T13:30:58.198 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:58 ntpd[16271]: Soliciting pool server 46.38.241.235 2026-04-15T13:30:58.304 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:58 ntpd[16280]: Soliciting pool server 178.63.9.212 2026-04-15T13:30:58.304 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:58 ntpd[16271]: Soliciting pool server 178.63.9.212 2026-04-15T13:30:59.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:59 ntpd[16280]: Soliciting pool server 212.132.108.186 2026-04-15T13:30:59.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:30:59 ntpd[16280]: Soliciting pool server 144.76.66.157 2026-04-15T13:30:59.198 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:59 ntpd[16271]: Soliciting pool server 94.130.184.193 2026-04-15T13:30:59.198 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:59 ntpd[16271]: Soliciting pool server 212.132.108.186 2026-04-15T13:30:59.198 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:30:59 ntpd[16271]: Soliciting pool server 144.76.66.157 2026-04-15T13:31:00.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:00 ntpd[16280]: Soliciting pool server 49.12.35.6 2026-04-15T13:31:00.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:00 ntpd[16280]: Soliciting pool server 49.12.199.148 2026-04-15T13:31:00.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:00 ntpd[16280]: Soliciting pool server 130.162.222.153 2026-04-15T13:31:00.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:00 ntpd[16271]: Soliciting pool server 49.12.35.6 2026-04-15T13:31:00.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:00 ntpd[16271]: Soliciting pool server 213.160.74.205 2026-04-15T13:31:00.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:00 ntpd[16271]: Soliciting pool server 49.12.199.148 2026-04-15T13:31:00.198 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:00 ntpd[16271]: Soliciting pool server 130.162.222.153 2026-04-15T13:31:01.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:01 ntpd[16280]: Soliciting pool server 193.138.81.81 2026-04-15T13:31:01.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:01 ntpd[16280]: Soliciting pool server 5.75.181.179 2026-04-15T13:31:01.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:01 ntpd[16280]: Soliciting pool server 185.125.190.56 2026-04-15T13:31:01.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:01 ntpd[16271]: Soliciting pool server 193.138.81.81 2026-04-15T13:31:01.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:01 ntpd[16271]: Soliciting pool server 5.75.181.179 2026-04-15T13:31:01.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:01 ntpd[16271]: Soliciting pool server 37.221.199.157 2026-04-15T13:31:01.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:01 ntpd[16271]: Soliciting pool server 185.125.190.56 2026-04-15T13:31:02.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:02 ntpd[16280]: Soliciting pool server 185.125.190.57 2026-04-15T13:31:02.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:02 ntpd[16280]: Soliciting pool server 162.159.200.1 2026-04-15T13:31:02.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:02 ntpd[16280]: Soliciting pool server 188.34.198.106 2026-04-15T13:31:02.196 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:02 ntpd[16271]: Soliciting pool server 185.125.190.57 2026-04-15T13:31:02.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:02 ntpd[16271]: Soliciting pool server 162.159.200.1 2026-04-15T13:31:02.197 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:02 ntpd[16271]: Soliciting pool server 188.34.198.106 2026-04-15T13:31:03.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:03 ntpd[16280]: Soliciting pool server 185.125.190.58 2026-04-15T13:31:03.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:03 ntpd[16280]: Soliciting pool server 91.202.42.82 2026-04-15T13:31:03.162 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:03 ntpd[16280]: Soliciting pool server 2003:a:87f:c37c::6 2026-04-15T13:31:04.161 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:04 ntpd[16280]: Soliciting pool server 91.189.91.157 2026-04-15T13:31:04.217 INFO:teuthology.orchestra.run.vm09.stdout:15 Apr 13:31:04 ntpd[16271]: ntpd: time slew +0.017139 s 2026-04-15T13:31:04.217 INFO:teuthology.orchestra.run.vm09.stdout:ntpd: time slew +0.017139s 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout: remote refid st t when poll reach delay offset jitter 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout:============================================================================== 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:04.238 INFO:teuthology.orchestra.run.vm09.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:06.184 INFO:teuthology.orchestra.run.vm06.stdout:15 Apr 13:31:06 ntpd[16280]: ntpd: time slew +0.000777 s 2026-04-15T13:31:06.184 INFO:teuthology.orchestra.run.vm06.stdout:ntpd: time slew +0.000777s 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout: remote refid st t when poll reach delay offset jitter 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout:============================================================================== 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:06.204 INFO:teuthology.orchestra.run.vm06.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:31:06.205 INFO:teuthology.run_tasks:Running task nvme_loop... 2026-04-15T13:31:06.207 INFO:tasks.nvme_loop:Setting up nvme_loop on scratch devices... 2026-04-15T13:31:06.207 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:31:06.207 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T13:31:06.248 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-15T13:31:06.248 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vg_nvme/lv_1 2026-04-15T13:31:06.292 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-15T13:31:06.292 INFO:teuthology.orchestra.run.vm06.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:06.292 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 791 Links: 1 2026-04-15T13:31:06.292 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:06.292 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:30:48.517316000 +0000 2026-04-15T13:31:06.293 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:30:48.401316000 +0000 2026-04-15T13:31:06.293 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:30:48.401316000 +0000 2026-04-15T13:31:06.293 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:31:06.293 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-15T13:31:06.340 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:31:06.340 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:31:06.340 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000152265 s, 3.4 MB/s 2026-04-15T13:31:06.340 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-15T13:31:06.385 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vg_nvme/lv_2 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 823 Links: 1 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:30:48.813316000 +0000 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:30:48.677316000 +0000 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:30:48.677316000 +0000 2026-04-15T13:31:06.433 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:31:06.433 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-15T13:31:06.480 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:31:06.480 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:31:06.480 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000134933 s, 3.8 MB/s 2026-04-15T13:31:06.481 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-15T13:31:06.526 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vg_nvme/lv_3 2026-04-15T13:31:06.572 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-15T13:31:06.572 INFO:teuthology.orchestra.run.vm06.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:06.573 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 854 Links: 1 2026-04-15T13:31:06.573 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:06.573 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:30:49.101316000 +0000 2026-04-15T13:31:06.573 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:30:48.973316000 +0000 2026-04-15T13:31:06.573 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:30:48.973316000 +0000 2026-04-15T13:31:06.573 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:31:06.573 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-15T13:31:06.621 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:31:06.621 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:31:06.621 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000163937 s, 3.1 MB/s 2026-04-15T13:31:06.621 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-15T13:31:06.670 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/vg_nvme/lv_4 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 889 Links: 1 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:30:52.953316000 +0000 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:30:49.257316000 +0000 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:30:49.257316000 +0000 2026-04-15T13:31:06.716 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:31:06.716 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-15T13:31:06.765 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:31:06.765 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:31:06.765 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000137247 s, 3.7 MB/s 2026-04-15T13:31:06.766 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-15T13:31:06.814 DEBUG:teuthology.orchestra.run.vm06:> sudo apt install -y linux-modules-extra-$(uname -r) 2026-04-15T13:31:06.867 INFO:teuthology.orchestra.run.vm06.stderr: 2026-04-15T13:31:06.868 INFO:teuthology.orchestra.run.vm06.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-15T13:31:06.868 INFO:teuthology.orchestra.run.vm06.stderr: 2026-04-15T13:31:06.893 INFO:teuthology.orchestra.run.vm06.stdout:Reading package lists... 2026-04-15T13:31:07.079 INFO:teuthology.orchestra.run.vm06.stdout:Building dependency tree... 2026-04-15T13:31:07.079 INFO:teuthology.orchestra.run.vm06.stdout:Reading state information... 2026-04-15T13:31:07.228 INFO:teuthology.orchestra.run.vm06.stdout:The following packages were automatically installed and are no longer required: 2026-04-15T13:31:07.229 INFO:teuthology.orchestra.run.vm06.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-15T13:31:07.229 INFO:teuthology.orchestra.run.vm06.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-15T13:31:07.229 INFO:teuthology.orchestra.run.vm06.stdout:The following additional packages will be installed: 2026-04-15T13:31:07.229 INFO:teuthology.orchestra.run.vm06.stdout: wireless-regdb 2026-04-15T13:31:07.277 INFO:teuthology.orchestra.run.vm06.stdout:The following NEW packages will be installed: 2026-04-15T13:31:07.277 INFO:teuthology.orchestra.run.vm06.stdout: linux-modules-extra-5.15.0-171-generic wireless-regdb 2026-04-15T13:31:07.314 INFO:teuthology.orchestra.run.vm06.stdout:0 upgraded, 2 newly installed, 0 to remove and 60 not upgraded. 2026-04-15T13:31:07.425 INFO:teuthology.orchestra.run.vm06.stdout:Need to get 63.9 MB of archives. 2026-04-15T13:31:07.425 INFO:teuthology.orchestra.run.vm06.stdout:After this operation, 353 MB of additional disk space will be used. 2026-04-15T13:31:07.425 INFO:teuthology.orchestra.run.vm06.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 wireless-regdb all 2025.10.07-0ubuntu1~22.04.1 [10.1 kB] 2026-04-15T13:31:07.466 INFO:teuthology.orchestra.run.vm06.stdout:Get:2 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 linux-modules-extra-5.15.0-171-generic amd64 5.15.0-171.181 [63.9 MB] 2026-04-15T13:31:09.794 INFO:teuthology.orchestra.run.vm06.stderr:debconf: unable to initialize frontend: Dialog 2026-04-15T13:31:09.794 INFO:teuthology.orchestra.run.vm06.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-15T13:31:09.794 INFO:teuthology.orchestra.run.vm06.stderr:debconf: falling back to frontend: Readline 2026-04-15T13:31:09.799 INFO:teuthology.orchestra.run.vm06.stderr:debconf: unable to initialize frontend: Readline 2026-04-15T13:31:09.799 INFO:teuthology.orchestra.run.vm06.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-15T13:31:09.799 INFO:teuthology.orchestra.run.vm06.stderr:debconf: falling back to frontend: Teletype 2026-04-15T13:31:09.801 INFO:teuthology.orchestra.run.vm06.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-15T13:31:09.825 INFO:teuthology.orchestra.run.vm06.stdout:Fetched 63.9 MB in 2s (27.6 MB/s) 2026-04-15T13:31:09.895 INFO:teuthology.orchestra.run.vm06.stdout:Selecting previously unselected package wireless-regdb. 2026-04-15T13:31:09.928 INFO:teuthology.orchestra.run.vm06.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 119267 files and directories currently installed.) 2026-04-15T13:31:09.931 INFO:teuthology.orchestra.run.vm06.stdout:Preparing to unpack .../wireless-regdb_2025.10.07-0ubuntu1~22.04.1_all.deb ... 2026-04-15T13:31:09.932 INFO:teuthology.orchestra.run.vm06.stdout:Unpacking wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-15T13:31:09.950 INFO:teuthology.orchestra.run.vm06.stdout:Selecting previously unselected package linux-modules-extra-5.15.0-171-generic. 2026-04-15T13:31:09.956 INFO:teuthology.orchestra.run.vm06.stdout:Preparing to unpack .../linux-modules-extra-5.15.0-171-generic_5.15.0-171.181_amd64.deb ... 2026-04-15T13:31:09.957 INFO:teuthology.orchestra.run.vm06.stdout:Unpacking linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-15T13:31:11.601 INFO:teuthology.orchestra.run.vm06.stdout:Setting up wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-15T13:31:11.603 INFO:teuthology.orchestra.run.vm06.stdout:Setting up linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-15T13:31:13.080 INFO:teuthology.orchestra.run.vm06.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-15T13:31:13.334 INFO:teuthology.orchestra.run.vm06.stdout:Processing triggers for linux-image-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-15T13:31:13.339 INFO:teuthology.orchestra.run.vm06.stdout:/etc/kernel/postinst.d/initramfs-tools: 2026-04-15T13:31:13.339 INFO:teuthology.orchestra.run.vm06.stdout:update-initramfs: Generating /boot/initrd.img-5.15.0-171-generic 2026-04-15T13:31:22.298 INFO:teuthology.orchestra.run.vm06.stdout:/etc/kernel/postinst.d/zz-update-grub: 2026-04-15T13:31:22.298 INFO:teuthology.orchestra.run.vm06.stdout:Sourcing file `/etc/default/grub' 2026-04-15T13:31:22.317 INFO:teuthology.orchestra.run.vm06.stdout:Sourcing file `/etc/default/grub.d/50-cloudimg-settings.cfg' 2026-04-15T13:31:22.318 INFO:teuthology.orchestra.run.vm06.stdout:Sourcing file `/etc/default/grub.d/init-select.cfg' 2026-04-15T13:31:22.319 INFO:teuthology.orchestra.run.vm06.stdout:Generating grub configuration file ... 2026-04-15T13:31:22.393 INFO:teuthology.orchestra.run.vm06.stdout:Found linux image: /boot/vmlinuz-5.15.0-171-generic 2026-04-15T13:31:22.398 INFO:teuthology.orchestra.run.vm06.stdout:Found initrd image: /boot/initrd.img-5.15.0-171-generic 2026-04-15T13:31:22.588 INFO:teuthology.orchestra.run.vm06.stdout:Warning: os-prober will not be executed to detect other bootable partitions. 2026-04-15T13:31:22.588 INFO:teuthology.orchestra.run.vm06.stdout:Systems on them will not be added to the GRUB boot configuration. 2026-04-15T13:31:22.588 INFO:teuthology.orchestra.run.vm06.stdout:Check GRUB_DISABLE_OS_PROBER documentation entry. 2026-04-15T13:31:22.595 INFO:teuthology.orchestra.run.vm06.stdout:done 2026-04-15T13:31:22.841 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:22.841 INFO:teuthology.orchestra.run.vm06.stdout:Running kernel seems to be up-to-date. 2026-04-15T13:31:22.841 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:22.841 INFO:teuthology.orchestra.run.vm06.stdout:Services to be restarted: 2026-04-15T13:31:22.844 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart apache-htcacheclean.service 2026-04-15T13:31:22.850 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart rsyslog.service 2026-04-15T13:31:22.853 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout:Service restarts being deferred: 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart networkd-dispatcher.service 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart unattended-upgrades.service 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout:No containers need to be restarted. 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout:No user sessions are running outdated binaries. 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:22.854 INFO:teuthology.orchestra.run.vm06.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-15T13:31:23.957 DEBUG:teuthology.orchestra.run.vm06:> sudo apt install -y nvme-cli 2026-04-15T13:31:24.008 INFO:teuthology.orchestra.run.vm06.stderr: 2026-04-15T13:31:24.008 INFO:teuthology.orchestra.run.vm06.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-15T13:31:24.008 INFO:teuthology.orchestra.run.vm06.stderr: 2026-04-15T13:31:24.037 INFO:teuthology.orchestra.run.vm06.stdout:Reading package lists... 2026-04-15T13:31:24.235 INFO:teuthology.orchestra.run.vm06.stdout:Building dependency tree... 2026-04-15T13:31:24.236 INFO:teuthology.orchestra.run.vm06.stdout:Reading state information... 2026-04-15T13:31:24.397 INFO:teuthology.orchestra.run.vm06.stdout:The following packages were automatically installed and are no longer required: 2026-04-15T13:31:24.398 INFO:teuthology.orchestra.run.vm06.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-15T13:31:24.398 INFO:teuthology.orchestra.run.vm06.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-15T13:31:24.448 INFO:teuthology.orchestra.run.vm06.stdout:The following NEW packages will be installed: 2026-04-15T13:31:24.448 INFO:teuthology.orchestra.run.vm06.stdout: nvme-cli 2026-04-15T13:31:24.482 INFO:teuthology.orchestra.run.vm06.stdout:0 upgraded, 1 newly installed, 0 to remove and 60 not upgraded. 2026-04-15T13:31:24.648 INFO:teuthology.orchestra.run.vm06.stdout:Need to get 474 kB of archives. 2026-04-15T13:31:24.648 INFO:teuthology.orchestra.run.vm06.stdout:After this operation, 1136 kB of additional disk space will be used. 2026-04-15T13:31:24.648 INFO:teuthology.orchestra.run.vm06.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 nvme-cli amd64 1.16-3ubuntu0.3 [474 kB] 2026-04-15T13:31:25.639 INFO:teuthology.orchestra.run.vm06.stderr:debconf: unable to initialize frontend: Dialog 2026-04-15T13:31:25.639 INFO:teuthology.orchestra.run.vm06.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-15T13:31:25.639 INFO:teuthology.orchestra.run.vm06.stderr:debconf: falling back to frontend: Readline 2026-04-15T13:31:25.643 INFO:teuthology.orchestra.run.vm06.stderr:debconf: unable to initialize frontend: Readline 2026-04-15T13:31:25.643 INFO:teuthology.orchestra.run.vm06.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-15T13:31:25.643 INFO:teuthology.orchestra.run.vm06.stderr:debconf: falling back to frontend: Teletype 2026-04-15T13:31:25.645 INFO:teuthology.orchestra.run.vm06.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-15T13:31:25.668 INFO:teuthology.orchestra.run.vm06.stdout:Fetched 474 kB in 1s (467 kB/s) 2026-04-15T13:31:25.682 INFO:teuthology.orchestra.run.vm06.stdout:Selecting previously unselected package nvme-cli. 2026-04-15T13:31:25.711 INFO:teuthology.orchestra.run.vm06.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 125173 files and directories currently installed.) 2026-04-15T13:31:25.713 INFO:teuthology.orchestra.run.vm06.stdout:Preparing to unpack .../nvme-cli_1.16-3ubuntu0.3_amd64.deb ... 2026-04-15T13:31:25.714 INFO:teuthology.orchestra.run.vm06.stdout:Unpacking nvme-cli (1.16-3ubuntu0.3) ... 2026-04-15T13:31:25.766 INFO:teuthology.orchestra.run.vm06.stdout:Setting up nvme-cli (1.16-3ubuntu0.3) ... 2026-04-15T13:31:25.828 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /lib/systemd/system/nvmefc-boot-connections.service. 2026-04-15T13:31:26.041 INFO:teuthology.orchestra.run.vm06.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmf-autoconnect.service → /lib/systemd/system/nvmf-autoconnect.service. 2026-04-15T13:31:26.386 INFO:teuthology.orchestra.run.vm06.stdout:nvmf-connect.target is a disabled or a static unit, not starting it. 2026-04-15T13:31:26.401 INFO:teuthology.orchestra.run.vm06.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-15T13:31:26.701 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:26.701 INFO:teuthology.orchestra.run.vm06.stdout:Running kernel seems to be up-to-date. 2026-04-15T13:31:26.701 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:26.701 INFO:teuthology.orchestra.run.vm06.stdout:Services to be restarted: 2026-04-15T13:31:26.704 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart apache-htcacheclean.service 2026-04-15T13:31:26.710 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart rsyslog.service 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout:Service restarts being deferred: 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart networkd-dispatcher.service 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout: systemctl restart unattended-upgrades.service 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout:No containers need to be restarted. 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout:No user sessions are running outdated binaries. 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:31:26.713 INFO:teuthology.orchestra.run.vm06.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-15T13:31:27.819 DEBUG:teuthology.orchestra.run.vm06:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-15T13:31:27.891 INFO:teuthology.orchestra.run.vm06.stdout:loop 2026-04-15T13:31:27.891 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vg_nvme/lv_1... 2026-04-15T13:31:27.891 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-15T13:31:27.945 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-04-15T13:31:27.960 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vg_nvme/lv_11 2026-04-15T13:31:27.973 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vg_nvme/lv_2... 2026-04-15T13:31:27.973 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-15T13:31:28.026 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-04-15T13:31:28.039 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vg_nvme/lv_21 2026-04-15T13:31:28.051 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vg_nvme/lv_3... 2026-04-15T13:31:28.051 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-15T13:31:28.105 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-04-15T13:31:28.119 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vg_nvme/lv_31 2026-04-15T13:31:28.132 INFO:tasks.nvme_loop:Connecting nvme_loop vm06:/dev/vg_nvme/lv_4... 2026-04-15T13:31:28.132 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-15T13:31:28.188 INFO:teuthology.orchestra.run.vm06.stdout:1 2026-04-15T13:31:28.204 INFO:teuthology.orchestra.run.vm06.stdout:/dev/vg_nvme/lv_41 2026-04-15T13:31:28.218 DEBUG:teuthology.orchestra.run.vm06:> lsblk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:loop0 7:0 0 63.8M 1 loop /snap/core20/2717 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:loop1 7:1 0 91.6M 1 loop /snap/lxd/37982 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:loop2 7:2 0 48.1M 1 loop /snap/snapd/25935 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:sr0 11:0 1 366K 0 rom 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:vda 252:0 0 40G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:├─vda1 252:1 0 39.9G 0 part / 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:├─vda14 252:14 0 4M 0 part 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:└─vda15 252:15 0 106M 0 part /boot/efi 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:vdb 252:16 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:vdc 252:32 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:vdd 252:48 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:vde 252:64 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:nvme0n1 259:1 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:nvme1n1 259:3 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:nvme2n1 259:5 0 20G 0 disk 2026-04-15T13:31:28.264 INFO:teuthology.orchestra.run.vm06.stdout:nvme3n1 259:7 0 20G 0 disk 2026-04-15T13:31:28.265 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme list -o json 2026-04-15T13:31:28.318 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-04-15T13:31:28.318 INFO:teuthology.orchestra.run.vm06.stdout: "Devices" : [ 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace" : 1, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath" : "/dev/nvme0n1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Index" : 0, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber" : "c12cb9df1acb8205e4f3", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize" : 512 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: }, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace" : 1, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath" : "/dev/nvme1n1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Index" : 1, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber" : "f4056127debabf608d65", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize" : 512 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: }, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace" : 1, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath" : "/dev/nvme2n1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Index" : 2, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber" : "c827407f82fbaf83f886", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize" : 512 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: }, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: { 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "NameSpace" : 1, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "DevicePath" : "/dev/nvme3n1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "Index" : 3, 2026-04-15T13:31:28.319 INFO:teuthology.orchestra.run.vm06.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: "SerialNumber" : "81c0ecdf70e1ab991990", 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: "SectorSize" : 512 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: } 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-04-15T13:31:28.320 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-04-15T13:31:28.320 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-15T13:31:28.378 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:28.378 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:28.378 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00400054 s, 1.0 MB/s 2026-04-15T13:31:28.379 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-15T13:31:28.431 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:28.431 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:28.431 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-04-15T13:31:28.432 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:28.486 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:28.486 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:28.486 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00340258 s, 1.2 MB/s 2026-04-15T13:31:28.486 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-15T13:31:28.535 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:28.535 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:28.535 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-04-15T13:31:28.536 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:28.590 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:28.590 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:28.590 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00474844 s, 863 kB/s 2026-04-15T13:31:28.591 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-15T13:31:28.641 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:28.641 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:28.641 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-04-15T13:31:28.642 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-15T13:31:28.694 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:28.694 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:28.694 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00480117 s, 853 kB/s 2026-04-15T13:31:28.695 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-15T13:31:28.750 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:28.750 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:28.750 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-04-15T13:31:28.751 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:28.805 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:28.805 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:28.805 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00343254 s, 1.2 MB/s 2026-04-15T13:31:28.806 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-15T13:31:28.853 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:28.853 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:28.853 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-04-15T13:31:28.854 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:28.908 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:28.909 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:28.909 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0035187 s, 1.2 MB/s 2026-04-15T13:31:28.909 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-15T13:31:28.957 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:28.957 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:28.957 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-04-15T13:31:28.958 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-15T13:31:29.009 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:29.009 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:29.009 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00344921 s, 1.2 MB/s 2026-04-15T13:31:29.010 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-15T13:31:29.057 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:29.057 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:29.057 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-04-15T13:31:29.058 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:29.109 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:29.109 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:29.109 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00397831 s, 1.0 MB/s 2026-04-15T13:31:29.110 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-15T13:31:29.158 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:29.158 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:29.158 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-04-15T13:31:29.159 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:29.213 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:29.213 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:29.213 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0035833 s, 1.1 MB/s 2026-04-15T13:31:29.215 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-15T13:31:29.266 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:29.266 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:29.266 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-04-15T13:31:29.267 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-15T13:31:29.320 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:29.320 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:29.320 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00342807 s, 1.2 MB/s 2026-04-15T13:31:29.321 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-15T13:31:29.369 INFO:teuthology.orchestra.run.vm06.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:29.369 INFO:teuthology.orchestra.run.vm06.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:29.369 INFO:teuthology.orchestra.run.vm06.stdout:00000016 2026-04-15T13:31:29.370 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:29.421 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:29.421 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:29.421 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00404079 s, 1.0 MB/s 2026-04-15T13:31:29.422 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-15T13:31:29.469 INFO:teuthology.orchestra.run.vm06.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:29.469 INFO:teuthology.orchestra.run.vm06.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:29.469 INFO:teuthology.orchestra.run.vm06.stdout:40000016 2026-04-15T13:31:29.470 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:29.522 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records in 2026-04-15T13:31:29.522 INFO:teuthology.orchestra.run.vm06.stderr:4096+0 records out 2026-04-15T13:31:29.522 INFO:teuthology.orchestra.run.vm06.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00351765 s, 1.2 MB/s 2026-04-15T13:31:29.524 DEBUG:teuthology.orchestra.run.vm06:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-15T13:31:29.574 INFO:teuthology.orchestra.run.vm06.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:29.574 INFO:teuthology.orchestra.run.vm06.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:29.574 INFO:teuthology.orchestra.run.vm06.stdout:280000016 2026-04-15T13:31:29.575 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T13:31:29.575 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:31:29.575 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/scratch_devs 2026-04-15T13:31:29.627 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:31:29.627 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T13:31:29.631 DEBUG:teuthology.misc:devs=['/dev/vg_nvme/lv_1', '/dev/vg_nvme/lv_2', '/dev/vg_nvme/lv_3', '/dev/vg_nvme/lv_4'] 2026-04-15T13:31:29.631 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vg_nvme/lv_1 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vg_nvme/lv_1 -> ../dm-0 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 790 Links: 1 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:30:14.333123466 +0000 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:30:14.209061466 +0000 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:30:14.209061466 +0000 2026-04-15T13:31:29.677 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:31:29.677 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vg_nvme/lv_1 of=/dev/null count=1 2026-04-15T13:31:29.725 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:31:29.725 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:31:29.725 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000196216 s, 2.6 MB/s 2026-04-15T13:31:29.725 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_1 2026-04-15T13:31:29.770 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vg_nvme/lv_2 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vg_nvme/lv_2 -> ../dm-1 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 822 Links: 1 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:30:14.657285466 +0000 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:30:14.521217465 +0000 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:30:14.521217465 +0000 2026-04-15T13:31:29.816 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:31:29.816 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vg_nvme/lv_2 of=/dev/null count=1 2026-04-15T13:31:29.868 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:31:29.868 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:31:29.868 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000158464 s, 3.2 MB/s 2026-04-15T13:31:29.869 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_2 2026-04-15T13:31:29.913 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vg_nvme/lv_3 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vg_nvme/lv_3 -> ../dm-2 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 855 Links: 1 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:30:14.957435465 +0000 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:30:14.817365466 +0000 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:30:14.817365466 +0000 2026-04-15T13:31:29.961 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:31:29.961 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vg_nvme/lv_3 of=/dev/null count=1 2026-04-15T13:31:30.008 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:31:30.008 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:31:30.008 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000189419 s, 2.7 MB/s 2026-04-15T13:31:30.009 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_3 2026-04-15T13:31:30.053 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/vg_nvme/lv_4 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/vg_nvme/lv_4 -> ../dm-3 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout: Size: 7 Blocks: 0 IO Block: 4096 symbolic link 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 888 Links: 1 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0777/lrwxrwxrwx) Uid: ( 0/ root) Gid: ( 0/ root) 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:30:52.904399466 +0000 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:30:15.117515466 +0000 2026-04-15T13:31:30.100 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:30:15.117515466 +0000 2026-04-15T13:31:30.101 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:31:30.101 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/vg_nvme/lv_4 of=/dev/null count=1 2026-04-15T13:31:30.148 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:31:30.148 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:31:30.148 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000182041 s, 2.8 MB/s 2026-04-15T13:31:30.149 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/vg_nvme/lv_4 2026-04-15T13:31:30.192 DEBUG:teuthology.orchestra.run.vm09:> sudo apt install -y linux-modules-extra-$(uname -r) 2026-04-15T13:31:30.242 INFO:teuthology.orchestra.run.vm09.stderr: 2026-04-15T13:31:30.242 INFO:teuthology.orchestra.run.vm09.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-15T13:31:30.242 INFO:teuthology.orchestra.run.vm09.stderr: 2026-04-15T13:31:30.267 INFO:teuthology.orchestra.run.vm09.stdout:Reading package lists... 2026-04-15T13:31:30.453 INFO:teuthology.orchestra.run.vm09.stdout:Building dependency tree... 2026-04-15T13:31:30.453 INFO:teuthology.orchestra.run.vm09.stdout:Reading state information... 2026-04-15T13:31:30.575 INFO:teuthology.orchestra.run.vm09.stdout:The following packages were automatically installed and are no longer required: 2026-04-15T13:31:30.576 INFO:teuthology.orchestra.run.vm09.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-15T13:31:30.576 INFO:teuthology.orchestra.run.vm09.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-15T13:31:30.576 INFO:teuthology.orchestra.run.vm09.stdout:The following additional packages will be installed: 2026-04-15T13:31:30.576 INFO:teuthology.orchestra.run.vm09.stdout: wireless-regdb 2026-04-15T13:31:30.618 INFO:teuthology.orchestra.run.vm09.stdout:The following NEW packages will be installed: 2026-04-15T13:31:30.618 INFO:teuthology.orchestra.run.vm09.stdout: linux-modules-extra-5.15.0-171-generic wireless-regdb 2026-04-15T13:31:30.650 INFO:teuthology.orchestra.run.vm09.stdout:0 upgraded, 2 newly installed, 0 to remove and 60 not upgraded. 2026-04-15T13:31:30.698 INFO:teuthology.orchestra.run.vm09.stdout:Need to get 63.9 MB of archives. 2026-04-15T13:31:30.698 INFO:teuthology.orchestra.run.vm09.stdout:After this operation, 353 MB of additional disk space will be used. 2026-04-15T13:31:30.698 INFO:teuthology.orchestra.run.vm09.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 wireless-regdb all 2025.10.07-0ubuntu1~22.04.1 [10.1 kB] 2026-04-15T13:31:30.706 INFO:teuthology.orchestra.run.vm09.stdout:Get:2 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 linux-modules-extra-5.15.0-171-generic amd64 5.15.0-171.181 [63.9 MB] 2026-04-15T13:31:31.446 INFO:teuthology.orchestra.run.vm09.stderr:debconf: unable to initialize frontend: Dialog 2026-04-15T13:31:31.446 INFO:teuthology.orchestra.run.vm09.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-15T13:31:31.446 INFO:teuthology.orchestra.run.vm09.stderr:debconf: falling back to frontend: Readline 2026-04-15T13:31:31.451 INFO:teuthology.orchestra.run.vm09.stderr:debconf: unable to initialize frontend: Readline 2026-04-15T13:31:31.451 INFO:teuthology.orchestra.run.vm09.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-15T13:31:31.451 INFO:teuthology.orchestra.run.vm09.stderr:debconf: falling back to frontend: Teletype 2026-04-15T13:31:31.454 INFO:teuthology.orchestra.run.vm09.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-15T13:31:31.480 INFO:teuthology.orchestra.run.vm09.stdout:Fetched 63.9 MB in 1s (99.1 MB/s) 2026-04-15T13:31:31.555 INFO:teuthology.orchestra.run.vm09.stdout:Selecting previously unselected package wireless-regdb. 2026-04-15T13:31:31.589 INFO:teuthology.orchestra.run.vm09.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 119267 files and directories currently installed.) 2026-04-15T13:31:31.592 INFO:teuthology.orchestra.run.vm09.stdout:Preparing to unpack .../wireless-regdb_2025.10.07-0ubuntu1~22.04.1_all.deb ... 2026-04-15T13:31:31.593 INFO:teuthology.orchestra.run.vm09.stdout:Unpacking wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-15T13:31:31.613 INFO:teuthology.orchestra.run.vm09.stdout:Selecting previously unselected package linux-modules-extra-5.15.0-171-generic. 2026-04-15T13:31:31.620 INFO:teuthology.orchestra.run.vm09.stdout:Preparing to unpack .../linux-modules-extra-5.15.0-171-generic_5.15.0-171.181_amd64.deb ... 2026-04-15T13:31:31.621 INFO:teuthology.orchestra.run.vm09.stdout:Unpacking linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-15T13:31:33.305 INFO:teuthology.orchestra.run.vm09.stdout:Setting up wireless-regdb (2025.10.07-0ubuntu1~22.04.1) ... 2026-04-15T13:31:33.307 INFO:teuthology.orchestra.run.vm09.stdout:Setting up linux-modules-extra-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-15T13:31:34.597 INFO:teuthology.orchestra.run.vm09.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-15T13:31:34.634 INFO:teuthology.orchestra.run.vm09.stdout:Processing triggers for linux-image-5.15.0-171-generic (5.15.0-171.181) ... 2026-04-15T13:31:34.640 INFO:teuthology.orchestra.run.vm09.stdout:/etc/kernel/postinst.d/initramfs-tools: 2026-04-15T13:31:34.640 INFO:teuthology.orchestra.run.vm09.stdout:update-initramfs: Generating /boot/initrd.img-5.15.0-171-generic 2026-04-15T13:31:44.067 INFO:teuthology.orchestra.run.vm09.stdout:/etc/kernel/postinst.d/zz-update-grub: 2026-04-15T13:31:44.067 INFO:teuthology.orchestra.run.vm09.stdout:Sourcing file `/etc/default/grub' 2026-04-15T13:31:44.093 INFO:teuthology.orchestra.run.vm09.stdout:Sourcing file `/etc/default/grub.d/50-cloudimg-settings.cfg' 2026-04-15T13:31:44.094 INFO:teuthology.orchestra.run.vm09.stdout:Sourcing file `/etc/default/grub.d/init-select.cfg' 2026-04-15T13:31:44.095 INFO:teuthology.orchestra.run.vm09.stdout:Generating grub configuration file ... 2026-04-15T13:31:44.180 INFO:teuthology.orchestra.run.vm09.stdout:Found linux image: /boot/vmlinuz-5.15.0-171-generic 2026-04-15T13:31:44.186 INFO:teuthology.orchestra.run.vm09.stdout:Found initrd image: /boot/initrd.img-5.15.0-171-generic 2026-04-15T13:31:44.394 INFO:teuthology.orchestra.run.vm09.stdout:Warning: os-prober will not be executed to detect other bootable partitions. 2026-04-15T13:31:44.394 INFO:teuthology.orchestra.run.vm09.stdout:Systems on them will not be added to the GRUB boot configuration. 2026-04-15T13:31:44.394 INFO:teuthology.orchestra.run.vm09.stdout:Check GRUB_DISABLE_OS_PROBER documentation entry. 2026-04-15T13:31:44.402 INFO:teuthology.orchestra.run.vm09.stdout:done 2026-04-15T13:31:44.662 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:44.662 INFO:teuthology.orchestra.run.vm09.stdout:Running kernel seems to be up-to-date. 2026-04-15T13:31:44.662 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:44.662 INFO:teuthology.orchestra.run.vm09.stdout:Services to be restarted: 2026-04-15T13:31:44.665 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart apache-htcacheclean.service 2026-04-15T13:31:44.672 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart rsyslog.service 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout:Service restarts being deferred: 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart networkd-dispatcher.service 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart unattended-upgrades.service 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout:No containers need to be restarted. 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout:No user sessions are running outdated binaries. 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:44.676 INFO:teuthology.orchestra.run.vm09.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-15T13:31:46.020 DEBUG:teuthology.orchestra.run.vm09:> sudo apt install -y nvme-cli 2026-04-15T13:31:46.071 INFO:teuthology.orchestra.run.vm09.stderr: 2026-04-15T13:31:46.071 INFO:teuthology.orchestra.run.vm09.stderr:WARNING: apt does not have a stable CLI interface. Use with caution in scripts. 2026-04-15T13:31:46.071 INFO:teuthology.orchestra.run.vm09.stderr: 2026-04-15T13:31:46.099 INFO:teuthology.orchestra.run.vm09.stdout:Reading package lists... 2026-04-15T13:31:46.317 INFO:teuthology.orchestra.run.vm09.stdout:Building dependency tree... 2026-04-15T13:31:46.318 INFO:teuthology.orchestra.run.vm09.stdout:Reading state information... 2026-04-15T13:31:46.546 INFO:teuthology.orchestra.run.vm09.stdout:The following packages were automatically installed and are no longer required: 2026-04-15T13:31:46.547 INFO:teuthology.orchestra.run.vm09.stdout: kpartx libsgutils2-2 sg3-utils sg3-utils-udev 2026-04-15T13:31:46.547 INFO:teuthology.orchestra.run.vm09.stdout:Use 'sudo apt autoremove' to remove them. 2026-04-15T13:31:46.595 INFO:teuthology.orchestra.run.vm09.stdout:The following NEW packages will be installed: 2026-04-15T13:31:46.595 INFO:teuthology.orchestra.run.vm09.stdout: nvme-cli 2026-04-15T13:31:46.634 INFO:teuthology.orchestra.run.vm09.stdout:0 upgraded, 1 newly installed, 0 to remove and 60 not upgraded. 2026-04-15T13:31:46.686 INFO:teuthology.orchestra.run.vm09.stdout:Need to get 474 kB of archives. 2026-04-15T13:31:46.686 INFO:teuthology.orchestra.run.vm09.stdout:After this operation, 1136 kB of additional disk space will be used. 2026-04-15T13:31:46.686 INFO:teuthology.orchestra.run.vm09.stdout:Get:1 http://archive.ubuntu.com/ubuntu jammy-updates/main amd64 nvme-cli amd64 1.16-3ubuntu0.3 [474 kB] 2026-04-15T13:31:46.881 INFO:teuthology.orchestra.run.vm09.stderr:debconf: unable to initialize frontend: Dialog 2026-04-15T13:31:46.881 INFO:teuthology.orchestra.run.vm09.stderr:debconf: (Dialog frontend will not work on a dumb terminal, an emacs shell buffer, or without a controlling terminal.) 2026-04-15T13:31:46.881 INFO:teuthology.orchestra.run.vm09.stderr:debconf: falling back to frontend: Readline 2026-04-15T13:31:46.887 INFO:teuthology.orchestra.run.vm09.stderr:debconf: unable to initialize frontend: Readline 2026-04-15T13:31:46.887 INFO:teuthology.orchestra.run.vm09.stderr:debconf: (This frontend requires a controlling tty.) 2026-04-15T13:31:46.887 INFO:teuthology.orchestra.run.vm09.stderr:debconf: falling back to frontend: Teletype 2026-04-15T13:31:46.890 INFO:teuthology.orchestra.run.vm09.stderr:dpkg-preconfigure: unable to re-open stdin: 2026-04-15T13:31:46.927 INFO:teuthology.orchestra.run.vm09.stdout:Fetched 474 kB in 0s (7857 kB/s) 2026-04-15T13:31:46.943 INFO:teuthology.orchestra.run.vm09.stdout:Selecting previously unselected package nvme-cli. 2026-04-15T13:31:46.977 INFO:teuthology.orchestra.run.vm09.stdout:(Reading database ... (Reading database ... 5% (Reading database ... 10% (Reading database ... 15% (Reading database ... 20% (Reading database ... 25% (Reading database ... 30% (Reading database ... 35% (Reading database ... 40% (Reading database ... 45% (Reading database ... 50% (Reading database ... 55% (Reading database ... 60% (Reading database ... 65% (Reading database ... 70% (Reading database ... 75% (Reading database ... 80% (Reading database ... 85% (Reading database ... 90% (Reading database ... 95% (Reading database ... 100% (Reading database ... 125173 files and directories currently installed.) 2026-04-15T13:31:46.979 INFO:teuthology.orchestra.run.vm09.stdout:Preparing to unpack .../nvme-cli_1.16-3ubuntu0.3_amd64.deb ... 2026-04-15T13:31:46.980 INFO:teuthology.orchestra.run.vm09.stdout:Unpacking nvme-cli (1.16-3ubuntu0.3) ... 2026-04-15T13:31:47.044 INFO:teuthology.orchestra.run.vm09.stdout:Setting up nvme-cli (1.16-3ubuntu0.3) ... 2026-04-15T13:31:47.111 INFO:teuthology.orchestra.run.vm09.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmefc-boot-connections.service → /lib/systemd/system/nvmefc-boot-connections.service. 2026-04-15T13:31:47.364 INFO:teuthology.orchestra.run.vm09.stdout:Created symlink /etc/systemd/system/default.target.wants/nvmf-autoconnect.service → /lib/systemd/system/nvmf-autoconnect.service. 2026-04-15T13:31:47.731 INFO:teuthology.orchestra.run.vm09.stdout:nvmf-connect.target is a disabled or a static unit, not starting it. 2026-04-15T13:31:47.751 INFO:teuthology.orchestra.run.vm09.stdout:Processing triggers for man-db (2.10.2-1) ... 2026-04-15T13:31:48.069 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:48.069 INFO:teuthology.orchestra.run.vm09.stdout:Running kernel seems to be up-to-date. 2026-04-15T13:31:48.069 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:48.069 INFO:teuthology.orchestra.run.vm09.stdout:Services to be restarted: 2026-04-15T13:31:48.073 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart apache-htcacheclean.service 2026-04-15T13:31:48.080 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart rsyslog.service 2026-04-15T13:31:48.083 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:48.083 INFO:teuthology.orchestra.run.vm09.stdout:Service restarts being deferred: 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart networkd-dispatcher.service 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout: systemctl restart unattended-upgrades.service 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout:No containers need to be restarted. 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout:No user sessions are running outdated binaries. 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:31:48.084 INFO:teuthology.orchestra.run.vm09.stdout:No VM guests are running outdated hypervisor (qemu) binaries on this host. 2026-04-15T13:31:49.346 DEBUG:teuthology.orchestra.run.vm09:> grep '^nvme_loop' /proc/modules || sudo modprobe nvme_loop && sudo mkdir -p /sys/kernel/config/nvmet/hosts/hostnqn && sudo mkdir -p /sys/kernel/config/nvmet/ports/1 && echo loop | sudo tee /sys/kernel/config/nvmet/ports/1/addr_trtype 2026-04-15T13:31:49.420 INFO:teuthology.orchestra.run.vm09.stdout:loop 2026-04-15T13:31:49.421 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vg_nvme/lv_1... 2026-04-15T13:31:49.421 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1 && echo -n /dev/vg_nvme/lv_1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_1/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_1 /sys/kernel/config/nvmet/ports/1/subsystems/lv_1 && sudo nvme connect -t loop -n lv_1 -q hostnqn 2026-04-15T13:31:49.476 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-04-15T13:31:49.492 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vg_nvme/lv_11 2026-04-15T13:31:49.508 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vg_nvme/lv_2... 2026-04-15T13:31:49.508 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1 && echo -n /dev/vg_nvme/lv_2 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_2/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_2 /sys/kernel/config/nvmet/ports/1/subsystems/lv_2 && sudo nvme connect -t loop -n lv_2 -q hostnqn 2026-04-15T13:31:49.562 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-04-15T13:31:49.581 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vg_nvme/lv_21 2026-04-15T13:31:49.594 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vg_nvme/lv_3... 2026-04-15T13:31:49.594 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1 && echo -n /dev/vg_nvme/lv_3 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_3/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_3 /sys/kernel/config/nvmet/ports/1/subsystems/lv_3 && sudo nvme connect -t loop -n lv_3 -q hostnqn 2026-04-15T13:31:49.648 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-04-15T13:31:49.664 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vg_nvme/lv_31 2026-04-15T13:31:49.678 INFO:tasks.nvme_loop:Connecting nvme_loop vm09:/dev/vg_nvme/lv_4... 2026-04-15T13:31:49.678 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4 && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/attr_allow_any_host && sudo mkdir -p /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1 && echo -n /dev/vg_nvme/lv_4 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/device_path && echo 1 | sudo tee /sys/kernel/config/nvmet/subsystems/lv_4/namespaces/1/enable && sudo ln -s /sys/kernel/config/nvmet/subsystems/lv_4 /sys/kernel/config/nvmet/ports/1/subsystems/lv_4 && sudo nvme connect -t loop -n lv_4 -q hostnqn 2026-04-15T13:31:49.736 INFO:teuthology.orchestra.run.vm09.stdout:1 2026-04-15T13:31:49.751 INFO:teuthology.orchestra.run.vm09.stdout:/dev/vg_nvme/lv_41 2026-04-15T13:31:49.764 DEBUG:teuthology.orchestra.run.vm09:> lsblk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:loop0 7:0 0 91.6M 1 loop /snap/lxd/37982 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:loop1 7:1 0 63.8M 1 loop /snap/core20/2717 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:loop2 7:2 0 48.1M 1 loop /snap/snapd/25935 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:sr0 11:0 1 366K 0 rom 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:vda 252:0 0 40G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:├─vda1 252:1 0 39.9G 0 part / 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:├─vda14 252:14 0 4M 0 part 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:└─vda15 252:15 0 106M 0 part /boot/efi 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:vdb 252:16 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:└─vg_nvme-lv_1 253:0 0 20G 0 lvm 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:vdc 252:32 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:└─vg_nvme-lv_2 253:1 0 20G 0 lvm 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:vdd 252:48 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:└─vg_nvme-lv_3 253:2 0 20G 0 lvm 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:vde 252:64 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:└─vg_nvme-lv_4 253:3 0 20G 0 lvm 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:nvme0n1 259:1 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:nvme1n1 259:3 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:nvme2n1 259:5 0 20G 0 disk 2026-04-15T13:31:49.814 INFO:teuthology.orchestra.run.vm09.stdout:nvme3n1 259:7 0 20G 0 disk 2026-04-15T13:31:49.815 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme list -o json 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "Devices" : [ 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace" : 1, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath" : "/dev/nvme0n1", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "Index" : 0, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber" : "b804cfa419b2e6ee38b2", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize" : 512 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace" : 1, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath" : "/dev/nvme1n1", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "Index" : 1, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber" : "d32cd802138d477b0c27", 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:49.864 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize" : 512 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace" : 1, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath" : "/dev/nvme2n1", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "Index" : 2, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber" : "1b2c890b6b49bb6c8abc", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize" : 512 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: }, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: { 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "NameSpace" : 1, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "DevicePath" : "/dev/nvme3n1", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "Firmware" : "5.15.0-1", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "Index" : 3, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "ModelNumber" : "Linux", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "SerialNumber" : "e1183b90f8221b5791e4", 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "UsedBytes" : 21470642176, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "MaximumLBA" : 41934848, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "PhysicalSize" : 21470642176, 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: "SectorSize" : 512 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: } 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-04-15T13:31:49.865 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-04-15T13:31:49.865 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=0 bs=1 count=4096 2026-04-15T13:31:49.920 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:49.920 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:49.920 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00460294 s, 890 kB/s 2026-04-15T13:31:49.921 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme0n1 2026-04-15T13:31:49.973 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:49.973 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:49.973 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-04-15T13:31:49.974 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:50.028 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.028 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.028 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00482953 s, 848 kB/s 2026-04-15T13:31:50.029 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme0n1 2026-04-15T13:31:50.080 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.080 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.080 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-04-15T13:31:50.081 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme0n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:50.136 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.136 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.137 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00473501 s, 865 kB/s 2026-04-15T13:31:50.137 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme0n1 2026-04-15T13:31:50.189 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.189 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.189 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-04-15T13:31:50.190 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=0 bs=1 count=4096 2026-04-15T13:31:50.250 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.250 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.250 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00462022 s, 887 kB/s 2026-04-15T13:31:50.251 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme1n1 2026-04-15T13:31:50.299 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.299 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.299 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-04-15T13:31:50.300 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:50.353 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.353 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.353 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00493631 s, 830 kB/s 2026-04-15T13:31:50.354 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme1n1 2026-04-15T13:31:50.408 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.409 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.409 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-04-15T13:31:50.409 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme1n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:50.464 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.464 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.464 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00504907 s, 811 kB/s 2026-04-15T13:31:50.465 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme1n1 2026-04-15T13:31:50.515 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.515 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.515 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-04-15T13:31:50.516 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=0 bs=1 count=4096 2026-04-15T13:31:50.568 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.568 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.568 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00466897 s, 877 kB/s 2026-04-15T13:31:50.569 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme2n1 2026-04-15T13:31:50.620 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.620 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.620 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-04-15T13:31:50.620 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:50.671 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.672 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.672 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00474581 s, 863 kB/s 2026-04-15T13:31:50.672 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme2n1 2026-04-15T13:31:50.719 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.719 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.719 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-04-15T13:31:50.719 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme2n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:50.772 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.772 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.773 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00532701 s, 769 kB/s 2026-04-15T13:31:50.773 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme2n1 2026-04-15T13:31:50.823 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.823 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.823 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-04-15T13:31:50.824 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=0 bs=1 count=4096 2026-04-15T13:31:50.879 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.879 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.879 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.0039657 s, 1.0 MB/s 2026-04-15T13:31:50.879 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s0 /dev/nvme3n1 2026-04-15T13:31:50.926 INFO:teuthology.orchestra.run.vm09.stdout:00000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:50.927 INFO:teuthology.orchestra.run.vm09.stdout:00000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:50.927 INFO:teuthology.orchestra.run.vm09.stdout:00000016 2026-04-15T13:31:50.927 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=1073741824 bs=1 count=4096 2026-04-15T13:31:50.979 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:50.979 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:50.979 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00384727 s, 1.1 MB/s 2026-04-15T13:31:50.980 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s1073741824 /dev/nvme3n1 2026-04-15T13:31:51.026 INFO:teuthology.orchestra.run.vm09.stdout:40000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:51.027 INFO:teuthology.orchestra.run.vm09.stdout:40000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:51.027 INFO:teuthology.orchestra.run.vm09.stdout:40000016 2026-04-15T13:31:51.027 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/zero of=/dev/nvme3n1 seek=10737418240 bs=1 count=4096 2026-04-15T13:31:51.080 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records in 2026-04-15T13:31:51.080 INFO:teuthology.orchestra.run.vm09.stderr:4096+0 records out 2026-04-15T13:31:51.080 INFO:teuthology.orchestra.run.vm09.stderr:4096 bytes (4.1 kB, 4.0 KiB) copied, 0.00378602 s, 1.1 MB/s 2026-04-15T13:31:51.081 DEBUG:teuthology.orchestra.run.vm09:> sudo hexdump -n22 -C -s10737418240 /dev/nvme3n1 2026-04-15T13:31:51.134 INFO:teuthology.orchestra.run.vm09.stdout:280000000 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| 2026-04-15T13:31:51.134 INFO:teuthology.orchestra.run.vm09.stdout:280000010 00 00 00 00 00 00 |......| 2026-04-15T13:31:51.134 INFO:teuthology.orchestra.run.vm09.stdout:280000016 2026-04-15T13:31:51.135 INFO:tasks.nvme_loop:new_devs ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T13:31:51.135 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:31:51.135 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/scratch_devs 2026-04-15T13:31:51.189 INFO:teuthology.run_tasks:Running task cephadm... 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Config: {'roleless': True, 'conf': {'mgr': {'debug mgr': 20, 'debug ms': 1}, 'mon': {'debug mon': 20, 'debug ms': 1, 'debug paxos': 20}, 'osd': {'debug ms': 1, 'debug osd': 20, 'osd mclock iops capacity threshold hdd': 49000, 'osd shutdown pgref assert': True}}, 'flavor': 'default', 'log-ignorelist': ['\\(MDS_ALL_DOWN\\)', '\\(MDS_UP_LESS_THAN_MAX\\)', 'CEPHADM_DAEMON_PLACE_FAIL', 'CEPHADM_FAILED_DAEMON'], 'log-only-match': ['CEPHADM_'], 'sha1': '187293b0588135c3607a12257332b6880af4eff9', 'cephadm_binary_url': 'https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm', 'containers': {'image': 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5'}} 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Provided image contains tag or digest, using it as is 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Cluster image is harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Cluster fsid is 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Choosing monitor IPs and ports... 2026-04-15T13:31:51.246 INFO:tasks.cephadm:No mon roles; fabricating mons 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Monitor IPs: {'mon.vm06': '192.168.123.106', 'mon.vm09': '192.168.123.109'} 2026-04-15T13:31:51.246 INFO:tasks.cephadm:Normalizing hostnames... 2026-04-15T13:31:51.246 DEBUG:teuthology.orchestra.run.vm06:> sudo hostname $(hostname -s) 2026-04-15T13:31:51.255 DEBUG:teuthology.orchestra.run.vm09:> sudo hostname $(hostname -s) 2026-04-15T13:31:51.265 INFO:tasks.cephadm:Downloading cephadm from url: https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm 2026-04-15T13:31:51.265 DEBUG:teuthology.orchestra.run.vm06:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-15T13:31:52.463 INFO:teuthology.orchestra.run.vm06.stdout:-rw-rw-r-- 1 ubuntu ubuntu 1036391 Apr 15 13:31 /home/ubuntu/cephtest/cephadm 2026-04-15T13:31:52.463 DEBUG:teuthology.orchestra.run.vm09:> curl --silent -L https://download.ceph.com/rpm-20.2.0/el9/noarch/cephadm > /home/ubuntu/cephtest/cephadm && ls -l /home/ubuntu/cephtest/cephadm 2026-04-15T13:31:53.693 INFO:teuthology.orchestra.run.vm09.stdout:-rw-rw-r-- 1 ubuntu ubuntu 1036391 Apr 15 13:31 /home/ubuntu/cephtest/cephadm 2026-04-15T13:31:53.694 DEBUG:teuthology.orchestra.run.vm06:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-15T13:31:53.698 DEBUG:teuthology.orchestra.run.vm09:> test -s /home/ubuntu/cephtest/cephadm && test $(stat -c%s /home/ubuntu/cephtest/cephadm) -gt 1000 && chmod +x /home/ubuntu/cephtest/cephadm 2026-04-15T13:31:53.706 INFO:tasks.cephadm:Pulling image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 on all hosts... 2026-04-15T13:31:53.706 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 pull 2026-04-15T13:31:53.744 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 pull 2026-04-15T13:31:54.020 INFO:teuthology.orchestra.run.vm06.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T13:31:54.045 INFO:teuthology.orchestra.run.vm09.stderr:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T13:32:26.705 INFO:teuthology.orchestra.run.vm06.stdout:{ 2026-04-15T13:32:26.706 INFO:teuthology.orchestra.run.vm06.stdout: "ceph_version": "ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable)", 2026-04-15T13:32:26.706 INFO:teuthology.orchestra.run.vm06.stdout: "image_id": "b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9", 2026-04-15T13:32:26.706 INFO:teuthology.orchestra.run.vm06.stdout: "repo_digests": [ 2026-04-15T13:32:26.706 INFO:teuthology.orchestra.run.vm06.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9" 2026-04-15T13:32:26.706 INFO:teuthology.orchestra.run.vm06.stdout: ] 2026-04-15T13:32:26.706 INFO:teuthology.orchestra.run.vm06.stdout:} 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout:{ 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout: "ceph_version": "ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable)", 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout: "image_id": "b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9", 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout: "repo_digests": [ 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout: "harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9" 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout: ] 2026-04-15T13:32:37.609 INFO:teuthology.orchestra.run.vm09.stdout:} 2026-04-15T13:32:37.629 DEBUG:teuthology.orchestra.run.vm06:> sudo mkdir -p /etc/ceph 2026-04-15T13:32:37.639 DEBUG:teuthology.orchestra.run.vm09:> sudo mkdir -p /etc/ceph 2026-04-15T13:32:37.648 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 777 /etc/ceph 2026-04-15T13:32:37.691 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 777 /etc/ceph 2026-04-15T13:32:37.700 INFO:tasks.cephadm:Writing seed config... 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [mgr] debug mgr = 20 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [mgr] debug ms = 1 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [mon] debug mon = 20 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [mon] debug ms = 1 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [mon] debug paxos = 20 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [osd] debug ms = 1 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [osd] debug osd = 20 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [osd] osd mclock iops capacity threshold hdd = 49000 2026-04-15T13:32:37.701 INFO:tasks.cephadm: override: [osd] osd shutdown pgref assert = True 2026-04-15T13:32:37.701 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:32:37.701 DEBUG:teuthology.orchestra.run.vm06:> dd of=/home/ubuntu/cephtest/seed.ceph.conf 2026-04-15T13:32:37.733 DEBUG:tasks.cephadm:Final config: [global] # make logging friendly to teuthology log_to_file = true log_to_stderr = false log to journald = false mon cluster log to file = true mon cluster log file level = debug mon clock drift allowed = 1.000 # replicate across OSDs, not hosts osd crush chooseleaf type = 0 #osd pool default size = 2 osd pool default erasure code profile = plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd # enable some debugging auth debug = true ms die on old message = true ms die on bug = true debug asserts on shutdown = true # adjust warnings mon max pg per osd = 10000# >= luminous mon pg warn max object skew = 0 mon osd allow primary affinity = true mon osd allow pg remap = true mon warn on legacy crush tunables = false mon warn on crush straw calc version zero = false mon warn on no sortbitwise = false mon warn on osd down out interval zero = false mon warn on too few osds = false mon_warn_on_pool_pg_num_not_power_of_two = false # disable pg_autoscaler by default for new pools osd_pool_default_pg_autoscale_mode = off # tests delete pools mon allow pool delete = true fsid = 75e42418-38cf-11f1-9300-4fe77ac4445b [osd] osd scrub load threshold = 5.0 osd scrub max interval = 600 osd mclock profile = high_recovery_ops osd recover clone overlap = true osd recovery max chunk = 1048576 osd deep scrub update digest min age = 30 osd map max advance = 10 osd memory target autotune = true # debugging osd debug shutdown = true osd debug op order = true osd debug verify stray on activate = true osd debug pg log writeout = true osd debug verify cached snaps = true osd debug verify missing on start = true osd debug misdirected ops = true osd op queue = debug_random osd op queue cut off = debug_random osd shutdown pgref assert = True bdev debug aio = true osd sloppy crc = true debug ms = 1 debug osd = 20 osd mclock iops capacity threshold hdd = 49000 [mgr] mon reweight min pgs per osd = 4 mon reweight min bytes per osd = 10 mgr/telemetry/nag = false debug mgr = 20 debug ms = 1 [mon] mon data avail warn = 5 mon mgr mkfs grace = 240 mon reweight min pgs per osd = 4 mon osd reporter subtree level = osd mon osd prime pg temp = true mon reweight min bytes per osd = 10 # rotate auth tickets quickly to exercise renewal paths auth mon ticket ttl = 660# 11m auth service ticket ttl = 240# 4m # don't complain about global id reclaim mon_warn_on_insecure_global_id_reclaim = false mon_warn_on_insecure_global_id_reclaim_allowed = false debug mon = 20 debug ms = 1 debug paxos = 20 [client.rgw] rgw cache enabled = true rgw enable ops log = true rgw enable usage log = true 2026-04-15T13:32:37.733 DEBUG:teuthology.orchestra.run.vm06:mon.vm06> sudo journalctl -f -n 0 -u ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06.service 2026-04-15T13:32:37.775 INFO:tasks.cephadm:Bootstrapping... 2026-04-15T13:32:37.775 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 -v bootstrap --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b --config /home/ubuntu/cephtest/seed.ceph.conf --output-config /etc/ceph/ceph.conf --output-keyring /etc/ceph/ceph.client.admin.keyring --output-pub-ssh-key /home/ubuntu/cephtest/ceph.pub --mon-ip 192.168.123.106 --skip-admin-label && sudo chmod +r /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:32:38.083 INFO:teuthology.orchestra.run.vm06.stdout:-------------------------------------------------------------------------------- 2026-04-15T13:32:38.083 INFO:teuthology.orchestra.run.vm06.stdout:cephadm ['--image', 'harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5', '-v', 'bootstrap', '--fsid', '75e42418-38cf-11f1-9300-4fe77ac4445b', '--config', '/home/ubuntu/cephtest/seed.ceph.conf', '--output-config', '/etc/ceph/ceph.conf', '--output-keyring', '/etc/ceph/ceph.client.admin.keyring', '--output-pub-ssh-key', '/home/ubuntu/cephtest/ceph.pub', '--mon-ip', '192.168.123.106', '--skip-admin-label'] 2026-04-15T13:32:38.084 INFO:teuthology.orchestra.run.vm06.stderr:Specifying an fsid for your cluster offers no advantages and may increase the likelihood of fsid conflicts. 2026-04-15T13:32:38.084 INFO:teuthology.orchestra.run.vm06.stdout:Verifying podman|docker is present... 2026-04-15T13:32:38.084 INFO:teuthology.orchestra.run.vm06.stdout:Verifying lvm2 is present... 2026-04-15T13:32:38.084 INFO:teuthology.orchestra.run.vm06.stdout:Verifying time synchronization is in place... 2026-04-15T13:32:38.087 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-15T13:32:38.087 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-15T13:32:38.089 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-15T13:32:38.089 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.092 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chronyd.service 2026-04-15T13:32:38.092 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chronyd.service: No such file or directory 2026-04-15T13:32:38.094 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chronyd.service 2026-04-15T13:32:38.094 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.096 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled systemd-timesyncd.service 2026-04-15T13:32:38.096 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout masked 2026-04-15T13:32:38.099 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active systemd-timesyncd.service 2026-04-15T13:32:38.099 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.101 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled ntpd.service 2026-04-15T13:32:38.101 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for ntpd.service: No such file or directory 2026-04-15T13:32:38.104 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active ntpd.service 2026-04-15T13:32:38.104 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.107 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout enabled 2026-04-15T13:32:38.110 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout active 2026-04-15T13:32:38.110 INFO:teuthology.orchestra.run.vm06.stdout:Unit ntp.service is enabled and running 2026-04-15T13:32:38.110 INFO:teuthology.orchestra.run.vm06.stdout:Repeating the final host check... 2026-04-15T13:32:38.110 INFO:teuthology.orchestra.run.vm06.stdout:docker (/usr/bin/docker) is present 2026-04-15T13:32:38.110 INFO:teuthology.orchestra.run.vm06.stdout:systemctl is present 2026-04-15T13:32:38.110 INFO:teuthology.orchestra.run.vm06.stdout:lvcreate is present 2026-04-15T13:32:38.113 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chrony.service 2026-04-15T13:32:38.113 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chrony.service: No such file or directory 2026-04-15T13:32:38.116 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chrony.service 2026-04-15T13:32:38.116 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.119 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled chronyd.service 2026-04-15T13:32:38.119 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for chronyd.service: No such file or directory 2026-04-15T13:32:38.121 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active chronyd.service 2026-04-15T13:32:38.121 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.125 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled systemd-timesyncd.service 2026-04-15T13:32:38.125 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout masked 2026-04-15T13:32:38.127 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active systemd-timesyncd.service 2026-04-15T13:32:38.127 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.129 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl is-enabled ntpd.service 2026-04-15T13:32:38.129 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to get unit file state for ntpd.service: No such file or directory 2026-04-15T13:32:38.133 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active ntpd.service 2026-04-15T13:32:38.133 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:32:38.136 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout enabled 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout active 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Unit ntp.service is enabled and running 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Host looks OK 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Cluster fsid: 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Acquiring lock 140355670893488 on /run/cephadm/75e42418-38cf-11f1-9300-4fe77ac4445b.lock 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Lock 140355670893488 acquired on /run/cephadm/75e42418-38cf-11f1-9300-4fe77ac4445b.lock 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Verifying IP 192.168.123.106 port 3300 ... 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Verifying IP 192.168.123.106 port 6789 ... 2026-04-15T13:32:38.139 INFO:teuthology.orchestra.run.vm06.stdout:Base mon IP(s) is [192.168.123.106:3300, 192.168.123.106:6789], mon addrv is [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-04-15T13:32:38.140 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.106 metric 100 2026-04-15T13:32:38.140 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-04-15T13:32:38.141 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout 192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.106 metric 100 2026-04-15T13:32:38.141 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout 192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.106 metric 100 2026-04-15T13:32:38.142 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout ::1 dev lo proto kernel metric 256 pref medium 2026-04-15T13:32:38.142 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout fe80::/64 dev ens3 proto kernel metric 256 pref medium 2026-04-15T13:32:38.143 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout 1: lo: mtu 65536 state UNKNOWN qlen 1000 2026-04-15T13:32:38.143 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout inet6 ::1/128 scope host 2026-04-15T13:32:38.143 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout 2: ens3: mtu 1500 state UP qlen 1000 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout inet6 fe80::5055:ff:fe00:6/64 scope link 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:/usr/sbin/ip: stdout valid_lft forever preferred_lft forever 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.0/24` 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.0/24` 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.1/32` 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Mon IP `192.168.123.106` is in CIDR network `192.168.123.1/32` 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Inferred mon public CIDR from local network configuration ['192.168.123.0/24', '192.168.123.0/24', '192.168.123.1/32', '192.168.123.1/32'] 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Internal network (--cluster-network) has not been provided, OSD replication will default to the public_network 2026-04-15T13:32:38.144 INFO:teuthology.orchestra.run.vm06.stdout:Pulling container image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5... 2026-04-15T13:32:38.526 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/docker: stdout sse-s3-kmip-preview-not-for-production-5: Pulling from custom-ceph/ceph/ceph 2026-04-15T13:32:38.526 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/docker: stdout Digest: sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9 2026-04-15T13:32:38.526 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/docker: stdout Status: Image is up to date for harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 2026-04-15T13:32:38.526 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/docker: stdout harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 2026-04-15T13:32:38.830 INFO:teuthology.orchestra.run.vm06.stdout:ceph: stdout ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable) 2026-04-15T13:32:38.830 INFO:teuthology.orchestra.run.vm06.stdout:Ceph version: ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable) 2026-04-15T13:32:38.830 INFO:teuthology.orchestra.run.vm06.stdout:Extracting ceph user uid/gid from container image... 2026-04-15T13:32:38.931 INFO:teuthology.orchestra.run.vm06.stdout:stat: stdout 167 167 2026-04-15T13:32:38.931 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial keys... 2026-04-15T13:32:39.096 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQB3k99pNKaFAhAA55NFC9GM7LNaaMi/U0KCLQ== 2026-04-15T13:32:39.209 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQB3k99p8ZWxChAACLsQtizJGNxAxTdllIe69A== 2026-04-15T13:32:39.322 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-authtool: stdout AQB3k99pNORqERAAdyJxnzG3QBSWo9p9Hlq1OQ== 2026-04-15T13:32:39.322 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial monmap... 2026-04-15T13:32:39.438 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-15T13:32:39.438 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout setting min_mon_release = tentacle 2026-04-15T13:32:39.438 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: set fsid to 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:39.438 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: stdout /usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-15T13:32:39.438 INFO:teuthology.orchestra.run.vm06.stdout:monmaptool for vm06 [v2:192.168.123.106:3300,v1:192.168.123.106:6789] on /usr/bin/monmaptool: monmap file /tmp/monmap 2026-04-15T13:32:39.438 INFO:teuthology.orchestra.run.vm06.stdout:setting min_mon_release = tentacle 2026-04-15T13:32:39.439 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: set fsid to 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:39.439 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/monmaptool: writing epoch 0 to /tmp/monmap (1 monitors) 2026-04-15T13:32:39.439 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:32:39.439 INFO:teuthology.orchestra.run.vm06.stdout:Creating mon... 2026-04-15T13:32:39.583 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.524+0000 7f96aa8cfd40 0 set uid:gid to 167:167 (ceph:ceph) 2026-04-15T13:32:39.583 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.528+0000 7f96aa8cfd40 1 imported monmap: 2026-04-15T13:32:39.583 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr epoch 0 2026-04-15T13:32:39.583 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:39.583 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr last_changed 2026-04-15T13:32:39.407888+0000 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr min_mon_release 20 (tentacle) 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr election_strategy: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.528+0000 7f96aa8cfd40 0 /usr/bin/ceph-mon: set fsid to 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: RocksDB version: 7.9.2 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Git sha 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Compile date 2026-04-14 11:30:02 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: DB SUMMARY 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: DB Session ID: DSHKBJ5G09XH683ZDMO0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm06/store.db dir, Total Num: 0, files: 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm06/store.db: 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.error_if_exists: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.create_if_missing: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.paranoid_checks: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.env: 0x55ecabe6a440 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.fs: PosixFileSystem 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.info_log: 0x55ecd0afdc20 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_file_opening_threads: 16 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.statistics: (nil) 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.use_fsync: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_log_file_size: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.log_file_time_to_roll: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.keep_log_file_num: 1000 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.recycle_log_file_num: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.allow_fallocate: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.allow_mmap_reads: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.allow_mmap_writes: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.use_direct_reads: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.create_missing_column_families: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.db_log_dir: 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.wal_dir: 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.table_cache_numshardbits: 6 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.advise_random_on_open: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.db_write_buffer_size: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.write_buffer_manager: 0x55ecd0af0780 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.use_adaptive_mutex: 0 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.rate_limiter: (nil) 2026-04-15T13:32:39.584 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.wal_recovery_mode: 2 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.enable_thread_tracking: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.enable_pipelined_write: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.unordered_write: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.row_cache: None 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.wal_filter: None 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.allow_ingest_behind: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.two_write_queues: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.manual_wal_flush: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.wal_compression: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.atomic_flush: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.persist_stats_to_disk: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.log_readahead_size: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.best_efforts_recovery: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.allow_data_in_errors: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.db_host_id: __hostname__ 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.enforce_single_del_contracts: true 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_background_jobs: 2 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_background_compactions: -1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_subcompactions: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.delayed_write_rate : 16777216 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_total_wal_size: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.stats_dump_period_sec: 600 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.stats_persist_period_sec: 600 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_open_files: -1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bytes_per_sync: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_readahead_size: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_background_flushes: -1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Compression algorithms supported: 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kZSTD supported: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kXpressCompression supported: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kBZip2Compression supported: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kLZ4Compression supported: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kZlibCompression supported: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kLZ4HCCompression supported: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: kSnappyCompression supported: 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/db_impl/db_impl_open.cc:317] Creating manifest 1 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000001 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.585 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.merge_operator: 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_filter: None 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_filter_factory: None 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.sst_partitioner_factory: None 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.table_factory: BlockBasedTable 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55ecd0aedc20) 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr cache_index_and_filter_blocks: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr cache_index_and_filter_blocks_with_high_priority: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr pin_top_level_index_and_filter: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr index_type: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr data_block_index_type: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr index_shortening: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr data_block_hash_table_util_ratio: 0.750000 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr checksum: 4 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr no_block_cache: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_cache: 0x55ecd0ae38d0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_cache_name: BinnedLRUCache 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_cache_options: 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr capacity : 536870912 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr num_shard_bits : 4 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr strict_capacity_limit : 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr high_pri_pool_ratio: 0.000 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_cache_compressed: (nil) 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr persistent_cache: (nil) 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_size: 4096 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_size_deviation: 10 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_restart_interval: 16 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr index_block_restart_interval: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr metadata_block_size: 4096 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr partition_filters: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr use_delta_encoding: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr filter_policy: bloomfilter 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr whole_key_filtering: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr verify_compression: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr read_amp_bytes_per_bit: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr format_version: 5 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr enable_index_compression: 1 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr block_align: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr max_auto_readahead_size: 262144 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr prepopulate_block_cache: 0 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr initial_auto_readahead_size: 8192 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr num_file_reads_for_auto_readahead: 2 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.write_buffer_size: 33554432 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_write_buffer_number: 2 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression: NoCompression 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression: Disabled 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.prefix_extractor: nullptr 2026-04-15T13:32:39.586 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.num_levels: 7 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.window_bits: -14 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.level: 32767 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.strategy: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.enabled: false 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.target_file_size_base: 67108864 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.target_file_size_multiplier: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.arena_block_size: 1048576 2026-04-15T13:32:39.587 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.disable_auto_compactions: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.inplace_update_support: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.memtable_huge_page_size: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.bloom_locality: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.max_successive_merges: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.paranoid_file_checks: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.force_consistency_checks: 1 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.report_bg_io_stats: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.ttl: 2592000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.enable_blob_files: false 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.min_blob_size: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.blob_file_size: 268435456 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.blob_compression_type: NoCompression 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.blob_file_starting_level: 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000001 succeeded,manifest_file_number is 1, next_file_number is 3, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 39f4c017-0854-4cc3-bbf2-2c1828b86433 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.532+0000 7f96aa8cfd40 4 rocksdb: [db/version_set.cc:5047] Creating manifest 5 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.536+0000 7f96aa8cfd40 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55ecd0b0ee00 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.536+0000 7f96aa8cfd40 4 rocksdb: DB pointer 0x55ecd0c38000 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.536+0000 7f96a2059640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.536+0000 7f96a2059640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr ** DB Stats ** 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr ** Compaction Stats [default] ** 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T13:32:39.588 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Sum 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.00 0.00 0 0.000 0 0 0.0 0.0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr ** Compaction Stats [default] ** 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Flush(GB): cumulative 0.000, interval 0.000 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr AddFile(GB): cumulative 0.000, interval 0.000 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr AddFile(Total Files): cumulative 0, interval 0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr AddFile(L0 Files): cumulative 0, interval 0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr AddFile(Keys): cumulative 0, interval 0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Cumulative compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Interval compaction: 0.00 GB write, 0.00 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Block cache BinnedLRUCache@0x55ecd0ae38d0#7 capacity: 512.00 MB usage: 0.00 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 1.7e-05 secs_since: 0 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr Block cache entry stats(count,size,portion): Misc(1,0.00 KB,0%) 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr ** File Read Latency Histogram By Level [default] ** 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.540+0000 7f96aa8cfd40 4 rocksdb: [db/db_impl/db_impl.cc:496] Shutdown: canceling all background work 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.540+0000 7f96aa8cfd40 4 rocksdb: [db/db_impl/db_impl.cc:704] Shutdown complete 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph-mon: stderr debug 2026-04-15T13:32:39.540+0000 7f96aa8cfd40 0 /usr/bin/ceph-mon: created monfs at /var/lib/ceph/mon/ceph-vm06 for mon.vm06 2026-04-15T13:32:39.589 INFO:teuthology.orchestra.run.vm06.stdout:create mon.vm06 on 2026-04-15T13:32:39.908 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph.target → /etc/systemd/system/ceph.target. 2026-04-15T13:32:40.113 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/multi-user.target.wants/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b.target → /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b.target. 2026-04-15T13:32:40.113 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph.target.wants/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b.target → /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b.target. 2026-04-15T13:32:40.325 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06 2026-04-15T13:32:40.325 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to reset failed state of unit ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06.service: Unit ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06.service not loaded. 2026-04-15T13:32:40.519 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b.target.wants/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06.service → /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service. 2026-04-15T13:32:40.531 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-04-15T13:32:40.531 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to enable service . firewalld.service is not available 2026-04-15T13:32:40.531 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mon to start... 2026-04-15T13:32:40.531 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mon... 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout cluster: 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout id: 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout health: HEALTH_OK 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout services: 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon: 1 daemons, quorum vm06 (age 0.134076s) [leader: vm06] 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr: no daemons active 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd: 0 osds: 0 up, 0 in 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout data: 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout pools: 0 pools, 0 pgs 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout objects: 0 objects, 0 B 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout usage: 0 B used, 0 B / 0 B avail 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout pgs: 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:mon is available 2026-04-15T13:32:40.899 INFO:teuthology.orchestra.run.vm06.stdout:Assimilating anything we can from ceph.conf... 2026-04-15T13:32:41.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:40 vm06 bash[27614]: cluster 2026-04-15T13:32:40.715626+0000 mon.vm06 (mon.0) 1 : cluster [INF] mon.vm06 is new leader, mons vm06 in quorum (ranks 0) 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [global] 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout fsid = 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [osd] 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-15T13:32:41.235 INFO:teuthology.orchestra.run.vm06.stdout:Generating new minimal ceph.conf... 2026-04-15T13:32:41.574 INFO:teuthology.orchestra.run.vm06.stdout:Restarting the monitor... 2026-04-15T13:32:41.731 INFO:teuthology.orchestra.run.vm06.stdout:Setting public_network to 192.168.123.0/24,192.168.123.1/32 in global config section 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 systemd[1]: Stopping Ceph mon.vm06 for 75e42418-38cf-11f1-9300-4fe77ac4445b... 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[27614]: debug 2026-04-15T13:32:41.612+0000 7f78740dd640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm06 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[27614]: debug 2026-04-15T13:32:41.612+0000 7f78740dd640 -1 mon.vm06@0(leader) e1 *** Got Signal Terminated *** 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28018]: ceph-75e42418-38cf-11f1-9300-4fe77ac4445b-mon-vm06 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 systemd[1]: ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06.service: Deactivated successfully. 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 systemd[1]: Stopped Ceph mon.vm06 for 75e42418-38cf-11f1-9300-4fe77ac4445b. 2026-04-15T13:32:41.791 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 systemd[1]: Started Ceph mon.vm06 for 75e42418-38cf-11f1-9300-4fe77ac4445b. 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.872+0000 7f6b1914dd40 0 set uid:gid to 167:167 (ceph:ceph) 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.872+0000 7f6b1914dd40 0 ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 6 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.872+0000 7f6b1914dd40 0 pidfile_write: ignore empty --pid-file 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 0 load: jerasure load: lrc 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: RocksDB version: 7.9.2 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Git sha 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Compile date 2026-04-14 11:30:02 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: DB SUMMARY 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: DB Session ID: W2BFWFHC8ZEYUJM6JBFA 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: CURRENT file: CURRENT 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: IDENTITY file: IDENTITY 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: MANIFEST file: MANIFEST-000010 size: 179 Bytes 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm06/store.db dir, Total Num: 1, files: 000008.sst 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm06/store.db: 000009.log size: 76257 ; 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.error_if_exists: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.create_if_missing: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.paranoid_checks: 1 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.env: 0x56101038b440 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.fs: PosixFileSystem 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.info_log: 0x561020597f00 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_file_opening_threads: 16 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.statistics: (nil) 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.use_fsync: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_log_file_size: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.log_file_time_to_roll: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.keep_log_file_num: 1000 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.recycle_log_file_num: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.allow_fallocate: 1 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.allow_mmap_reads: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.allow_mmap_writes: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.use_direct_reads: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.create_missing_column_families: 0 2026-04-15T13:32:42.056 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.db_log_dir: 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.wal_dir: 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.table_cache_numshardbits: 6 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.advise_random_on_open: 1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.db_write_buffer_size: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.write_buffer_manager: 0x56102059a500 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.use_adaptive_mutex: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.rate_limiter: (nil) 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.wal_recovery_mode: 2 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.enable_thread_tracking: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.enable_pipelined_write: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.unordered_write: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.row_cache: None 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.wal_filter: None 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.allow_ingest_behind: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.two_write_queues: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.manual_wal_flush: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.wal_compression: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.atomic_flush: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.persist_stats_to_disk: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.log_readahead_size: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.best_efforts_recovery: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.allow_data_in_errors: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.db_host_id: __hostname__ 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.enforce_single_del_contracts: true 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_background_jobs: 2 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_background_compactions: -1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_subcompactions: 1 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-15T13:32:42.057 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.delayed_write_rate : 16777216 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_total_wal_size: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.stats_dump_period_sec: 600 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.stats_persist_period_sec: 600 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_open_files: -1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bytes_per_sync: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_readahead_size: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_background_flushes: -1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Compression algorithms supported: 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kZSTD supported: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kXpressCompression supported: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kBZip2Compression supported: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kLZ4Compression supported: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kZlibCompression supported: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kLZ4HCCompression supported: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: kSnappyCompression supported: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000010 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.merge_operator: 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_filter: None 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_filter_factory: None 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.sst_partitioner_factory: None 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.table_factory: BlockBasedTable 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x561020596ec0) 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cache_index_and_filter_blocks: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: pin_top_level_index_and_filter: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: index_type: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: data_block_index_type: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: index_shortening: 1 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: data_block_hash_table_util_ratio: 0.750000 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: checksum: 4 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: no_block_cache: 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_cache: 0x56102058d8d0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_cache_name: BinnedLRUCache 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_cache_options: 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: capacity : 536870912 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: num_shard_bits : 4 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: strict_capacity_limit : 0 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: high_pri_pool_ratio: 0.000 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_cache_compressed: (nil) 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: persistent_cache: (nil) 2026-04-15T13:32:42.058 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_size: 4096 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_size_deviation: 10 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_restart_interval: 16 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: index_block_restart_interval: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: metadata_block_size: 4096 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: partition_filters: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: use_delta_encoding: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: filter_policy: bloomfilter 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: whole_key_filtering: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: verify_compression: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: read_amp_bytes_per_bit: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: format_version: 5 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: enable_index_compression: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: block_align: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: max_auto_readahead_size: 262144 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: prepopulate_block_cache: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: initial_auto_readahead_size: 8192 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: num_file_reads_for_auto_readahead: 2 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.write_buffer_size: 33554432 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_write_buffer_number: 2 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression: NoCompression 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression: Disabled 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.prefix_extractor: nullptr 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.num_levels: 7 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.window_bits: -14 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.level: 32767 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.strategy: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.enabled: false 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.target_file_size_base: 67108864 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.target_file_size_multiplier: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-15T13:32:42.059 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.arena_block_size: 1048576 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.disable_auto_compactions: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.inplace_update_support: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.memtable_huge_page_size: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.bloom_locality: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.max_successive_merges: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.paranoid_file_checks: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.force_consistency_checks: 1 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.report_bg_io_stats: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.ttl: 2592000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.enable_blob_files: false 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.min_blob_size: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.blob_file_size: 268435456 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.blob_compression_type: NoCompression 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.blob_file_starting_level: 0 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm06/store.db/MANIFEST-000010 succeeded,manifest_file_number is 10, next_file_number is 12, last_sequence is 5, log_number is 5,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 5 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 5 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 39f4c017-0854-4cc3-bbf2-2c1828b86433 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776259961880374, "job": 1, "event": "recovery_started", "wal_files": [9]} 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.876+0000 7f6b1914dd40 4 rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #9 mode 2 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.880+0000 7f6b1914dd40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776259961882852, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 13, "file_size": 73352, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 8, "largest_seqno": 227, "table_properties": {"data_size": 71638, "index_size": 167, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 517, "raw_key_size": 9666, "raw_average_key_size": 48, "raw_value_size": 66145, "raw_average_value_size": 334, "num_data_blocks": 8, "num_entries": 198, "num_filter_entries": 198, "num_deletions": 3, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776259961, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "39f4c017-0854-4cc3-bbf2-2c1828b86433", "db_session_id": "W2BFWFHC8ZEYUJM6JBFA", "orig_file_number": 13, "seqno_to_time_mapping": "N/A"}} 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.880+0000 7f6b1914dd40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776259961883005, "job": 1, "event": "recovery_finished"} 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.880+0000 7f6b1914dd40 4 rocksdb: [db/version_set.cc:5047] Creating manifest 15 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 4 rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm06/store.db/000009.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x5610205b8e00 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 4 rocksdb: DB pointer 0x561020708000 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b0eef3640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b0eef3640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-15T13:32:42.060 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: ** DB Stats ** 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: ** Compaction Stats [default] ** 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: L0 2/0 73.51 KB 0.5 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 35.8 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Sum 2/0 73.51 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 35.8 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 35.8 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: ** Compaction Stats [default] ** 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 35.8 0.00 0.00 1 0.002 0 0 0.0 0.0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Flush(GB): cumulative 0.000, interval 0.000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: AddFile(Total Files): cumulative 0, interval 0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: AddFile(L0 Files): cumulative 0, interval 0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: AddFile(Keys): cumulative 0, interval 0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Cumulative compaction: 0.00 GB write, 8.79 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Interval compaction: 0.00 GB write, 8.79 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Block cache BinnedLRUCache@0x56102058d8d0#6 capacity: 512.00 MB usage: 2.02 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 3.1e-05 secs_since: 0 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: Block cache entry stats(count,size,portion): FilterBlock(2,0.70 KB,0.00013411%) IndexBlock(2,0.36 KB,6.85453e-05%) Misc(2,0.95 KB,0.000181794%) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: ** File Read Latency Histogram By Level [default] ** 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 starting mon.vm06 rank 0 at public addrs [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] at bind addrs [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon_data /var/lib/ceph/mon/ceph-vm06 fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 1 mon.vm06@-1(???) e1 preinit fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 mon.vm06@-1(???).mds e1 new map 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 mon.vm06@-1(???).mds e1 print_map 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: e1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: btime 2026-04-15T13:32:40:722083+0000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: enable_multiple, ever_enabled_multiple: 1,1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: legacy client fscid: -1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: No filesystems configured 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 mon.vm06@-1(???).osd e1 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 mon.vm06@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 mon.vm06@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 0 mon.vm06@-1(???).osd e1 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: debug 2026-04-15T13:32:41.884+0000 7f6b1914dd40 1 mon.vm06@-1(???).paxosservice(auth 1..2) refresh upgraded, format 0 -> 3 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892862+0000 mon.vm06 (mon.0) 1 : cluster [INF] mon.vm06 is new leader, mons vm06 in quorum (ranks 0) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892862+0000 mon.vm06 (mon.0) 1 : cluster [INF] mon.vm06 is new leader, mons vm06 in quorum (ranks 0) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892895+0000 mon.vm06 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892895+0000 mon.vm06 (mon.0) 2 : cluster [DBG] monmap epoch 1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892903+0000 mon.vm06 (mon.0) 3 : cluster [DBG] fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892903+0000 mon.vm06 (mon.0) 3 : cluster [DBG] fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892908+0000 mon.vm06 (mon.0) 4 : cluster [DBG] last_changed 2026-04-15T13:32:39.407888+0000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892908+0000 mon.vm06 (mon.0) 4 : cluster [DBG] last_changed 2026-04-15T13:32:39.407888+0000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892913+0000 mon.vm06 (mon.0) 5 : cluster [DBG] created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892913+0000 mon.vm06 (mon.0) 5 : cluster [DBG] created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892920+0000 mon.vm06 (mon.0) 6 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892920+0000 mon.vm06 (mon.0) 6 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892932+0000 mon.vm06 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892932+0000 mon.vm06 (mon.0) 7 : cluster [DBG] election_strategy: 1 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892937+0000 mon.vm06 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:32:42.061 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.892937+0000 mon.vm06 (mon.0) 8 : cluster [DBG] 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:32:42.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.893271+0000 mon.vm06 (mon.0) 9 : cluster [DBG] fsmap 2026-04-15T13:32:42.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.893271+0000 mon.vm06 (mon.0) 9 : cluster [DBG] fsmap 2026-04-15T13:32:42.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.893291+0000 mon.vm06 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-04-15T13:32:42.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.893291+0000 mon.vm06 (mon.0) 10 : cluster [DBG] osdmap e1: 0 total, 0 up, 0 in 2026-04-15T13:32:42.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.893896+0000 mon.vm06 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-04-15T13:32:42.062 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:41 vm06 bash[28114]: cluster 2026-04-15T13:32:41.893896+0000 mon.vm06 (mon.0) 11 : cluster [DBG] mgrmap e1: no daemons active 2026-04-15T13:32:42.115 INFO:teuthology.orchestra.run.vm06.stdout:Wrote config to /etc/ceph/ceph.conf 2026-04-15T13:32:42.116 INFO:teuthology.orchestra.run.vm06.stdout:Wrote keyring to /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:32:42.116 INFO:teuthology.orchestra.run.vm06.stdout:Creating mgr... 2026-04-15T13:32:42.116 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:9283 ... 2026-04-15T13:32:42.116 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:8765 ... 2026-04-15T13:32:42.116 INFO:teuthology.orchestra.run.vm06.stdout:Verifying port 0.0.0.0:8443 ... 2026-04-15T13:32:42.324 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:42 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:32:42.330 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 1 from systemctl reset-failed ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mgr.vm06.qbbldl 2026-04-15T13:32:42.330 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Failed to reset failed state of unit ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mgr.vm06.qbbldl.service: Unit ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mgr.vm06.qbbldl.service not loaded. 2026-04-15T13:32:42.543 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stderr Created symlink /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b.target.wants/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mgr.vm06.qbbldl.service → /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service. 2026-04-15T13:32:42.550 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-04-15T13:32:42.550 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to enable service . firewalld.service is not available 2026-04-15T13:32:42.550 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-04-15T13:32:42.550 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to open ports <[9283, 8765, 8443]>. firewalld.service is not available 2026-04-15T13:32:42.550 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr to start... 2026-04-15T13:32:42.550 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr... 2026-04-15T13:32:42.597 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:42 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 1, 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.073 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:43.074 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:43.075 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:43.076 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (1/15)... 2026-04-15T13:32:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:43 vm06 bash[28114]: audit 2026-04-15T13:32:42.047228+0000 mon.vm06 (mon.0) 12 : audit [INF] from='client.? 192.168.123.106:0/2913596629' entity='client.admin' 2026-04-15T13:32:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:43 vm06 bash[28114]: audit 2026-04-15T13:32:42.047228+0000 mon.vm06 (mon.0) 12 : audit [INF] from='client.? 192.168.123.106:0/2913596629' entity='client.admin' 2026-04-15T13:32:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:43 vm06 bash[28114]: audit 2026-04-15T13:32:43.024326+0000 mon.vm06 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.106:0/507094232' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:43 vm06 bash[28114]: audit 2026-04-15T13:32:43.024326+0000 mon.vm06 (mon.0) 13 : audit [DBG] from='client.? 192.168.123.106:0/507094232' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:45.416 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 3, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:45.417 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (2/15)... 2026-04-15T13:32:45.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:45 vm06 bash[28114]: audit 2026-04-15T13:32:45.363671+0000 mon.vm06 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.106:0/2495953681' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:45.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:45 vm06 bash[28114]: audit 2026-04-15T13:32:45.363671+0000 mon.vm06 (mon.0) 14 : audit [DBG] from='client.? 192.168.123.106:0/2495953681' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 5, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.801 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:47.803 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (3/15)... 2026-04-15T13:32:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:47 vm06 bash[28114]: audit 2026-04-15T13:32:47.755977+0000 mon.vm06 (mon.0) 15 : audit [DBG] from='client.? 192.168.123.106:0/3403727543' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:47 vm06 bash[28114]: audit 2026-04-15T13:32:47.755977+0000 mon.vm06 (mon.0) 15 : audit [DBG] from='client.? 192.168.123.106:0/3403727543' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 8, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:50.225 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:50.227 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (4/15)... 2026-04-15T13:32:50.468 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:50 vm06 bash[28114]: audit 2026-04-15T13:32:50.180537+0000 mon.vm06 (mon.0) 16 : audit [DBG] from='client.? 192.168.123.106:0/333423810' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:50.469 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:50 vm06 bash[28114]: audit 2026-04-15T13:32:50.180537+0000 mon.vm06 (mon.0) 16 : audit [DBG] from='client.? 192.168.123.106:0/333423810' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 10, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:52.651 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:52.652 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:52.652 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:52.652 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:52.653 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:52.654 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (5/15)... 2026-04-15T13:32:53.011 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:52 vm06 bash[28114]: audit 2026-04-15T13:32:52.578826+0000 mon.vm06 (mon.0) 17 : audit [DBG] from='client.? 192.168.123.106:0/3721159533' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:53.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:52 vm06 bash[28114]: audit 2026-04-15T13:32:52.578826+0000 mon.vm06 (mon.0) 17 : audit [DBG] from='client.? 192.168.123.106:0/3721159533' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: cluster 2026-04-15T13:32:54.347557+0000 mon.vm06 (mon.0) 18 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: cluster 2026-04-15T13:32:54.347557+0000 mon.vm06 (mon.0) 18 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: cluster 2026-04-15T13:32:54.353872+0000 mon.vm06 (mon.0) 19 : cluster [DBG] mgrmap e2: vm06.qbbldl(active, starting, since 0.00644531s) 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: cluster 2026-04-15T13:32:54.353872+0000 mon.vm06 (mon.0) 19 : cluster [DBG] mgrmap e2: vm06.qbbldl(active, starting, since 0.00644531s) 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.355376+0000 mon.vm06 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.355376+0000 mon.vm06 (mon.0) 20 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.355475+0000 mon.vm06 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.355475+0000 mon.vm06 (mon.0) 21 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.355550+0000 mon.vm06 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.355550+0000 mon.vm06 (mon.0) 22 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.356756+0000 mon.vm06 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.356756+0000 mon.vm06 (mon.0) 23 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.356844+0000 mon.vm06 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.356844+0000 mon.vm06 (mon.0) 24 : audit [DBG] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: cluster 2026-04-15T13:32:54.362875+0000 mon.vm06 (mon.0) 25 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: cluster 2026-04-15T13:32:54.362875+0000 mon.vm06 (mon.0) 25 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.375753+0000 mon.vm06 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.375753+0000 mon.vm06 (mon.0) 26 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.378720+0000 mon.vm06 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.378720+0000 mon.vm06 (mon.0) 27 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.379216+0000 mon.vm06 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.379216+0000 mon.vm06 (mon.0) 28 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.381764+0000 mon.vm06 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.381764+0000 mon.vm06 (mon.0) 29 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.383877+0000 mon.vm06 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' 2026-04-15T13:32:54.681 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:54 vm06 bash[28114]: audit 2026-04-15T13:32:54.383877+0000 mon.vm06 (mon.0) 30 : audit [INF] from='mgr.14100 192.168.123.106:0/2591782701' entity='mgr.vm06.qbbldl' 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 13, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:55.059 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:55.060 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": false, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:55.061 INFO:teuthology.orchestra.run.vm06.stdout:mgr not available, waiting (6/15)... 2026-04-15T13:32:55.679 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:55 vm06 bash[28114]: audit 2026-04-15T13:32:54.991264+0000 mon.vm06 (mon.0) 31 : audit [DBG] from='client.? 192.168.123.106:0/1891164613' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:55.680 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:55 vm06 bash[28114]: audit 2026-04-15T13:32:54.991264+0000 mon.vm06 (mon.0) 31 : audit [DBG] from='client.? 192.168.123.106:0/1891164613' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:55.680 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:55 vm06 bash[28114]: cluster 2026-04-15T13:32:55.357704+0000 mon.vm06 (mon.0) 32 : cluster [DBG] mgrmap e3: vm06.qbbldl(active, since 1.01028s) 2026-04-15T13:32:55.680 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:55 vm06 bash[28114]: cluster 2026-04-15T13:32:55.357704+0000 mon.vm06 (mon.0) 32 : cluster [DBG] mgrmap e3: vm06.qbbldl(active, since 1.01028s) 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsid": "75e42418-38cf-11f1-9300-4fe77ac4445b", 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "health": { 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "status": "HEALTH_OK", 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "checks": {}, 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mutes": [] 2026-04-15T13:32:57.531 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "election_epoch": 5, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum": [ 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 0 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_names": [ 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "vm06" 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "quorum_age": 15, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "monmap": { 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "min_mon_release_name": "tentacle", 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_mons": 1 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osdmap": { 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_osds": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_up_osds": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_up_since": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_in_osds": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "osd_in_since": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_remapped_pgs": 0 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgmap": { 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "pgs_by_state": [], 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pgs": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_pools": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_objects": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "data_bytes": 0, 2026-04-15T13:32:57.532 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_used": 0, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_avail": 0, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "bytes_total": 0 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "fsmap": { 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "btime": "2026-04-15T13:32:40:722083+0000", 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "by_rank": [], 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "up:standby": 0 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap": { 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standbys": 0, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modules": [ 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "iostat", 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "nfs" 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ], 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "servicemap": { 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 1, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "modified": "2026-04-15T13:32:40.722866+0000", 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "services": {} 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout }, 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "progress_events": {} 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:57.533 INFO:teuthology.orchestra.run.vm06.stdout:mgr is available 2026-04-15T13:32:57.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:57 vm06 bash[28114]: cluster 2026-04-15T13:32:56.407825+0000 mon.vm06 (mon.0) 33 : cluster [DBG] mgrmap e4: vm06.qbbldl(active, since 2s) 2026-04-15T13:32:57.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:57 vm06 bash[28114]: cluster 2026-04-15T13:32:56.407825+0000 mon.vm06 (mon.0) 33 : cluster [DBG] mgrmap e4: vm06.qbbldl(active, since 2s) 2026-04-15T13:32:57.946 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:57.946 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [global] 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout fsid = 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_cluster_log_file_level = debug 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_host = [v2:192.168.123.106:3300,v1:192.168.123.106:6789] 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_pg_remap = true 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_osd_allow_primary_affinity = true 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mon_warn_on_no_sortbitwise = false 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_crush_chooseleaf_type = 0 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [mgr] 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout mgr/telemetry/nag = false 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout [osd] 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_map_max_advance = 10 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout osd_sloppy_crc = true 2026-04-15T13:32:57.947 INFO:teuthology.orchestra.run.vm06.stdout:Enabling cephadm module... 2026-04-15T13:32:58.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:58 vm06 bash[28114]: audit 2026-04-15T13:32:57.488357+0000 mon.vm06 (mon.0) 34 : audit [DBG] from='client.? 192.168.123.106:0/2334917931' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:58.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:58 vm06 bash[28114]: audit 2026-04-15T13:32:57.488357+0000 mon.vm06 (mon.0) 34 : audit [DBG] from='client.? 192.168.123.106:0/2334917931' entity='client.admin' cmd={"prefix": "status", "format": "json-pretty"} : dispatch 2026-04-15T13:32:58.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:58 vm06 bash[28114]: audit 2026-04-15T13:32:57.889692+0000 mon.vm06 (mon.0) 35 : audit [INF] from='client.? 192.168.123.106:0/2975424121' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-15T13:32:58.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:58 vm06 bash[28114]: audit 2026-04-15T13:32:57.889692+0000 mon.vm06 (mon.0) 35 : audit [INF] from='client.? 192.168.123.106:0/2975424121' entity='client.admin' cmd={"prefix": "config assimilate-conf"} : dispatch 2026-04-15T13:32:58.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:58 vm06 bash[28114]: audit 2026-04-15T13:32:58.324056+0000 mon.vm06 (mon.0) 36 : audit [INF] from='client.? 192.168.123.106:0/4126174770' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "cephadm"} : dispatch 2026-04-15T13:32:58.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:58 vm06 bash[28114]: audit 2026-04-15T13:32:58.324056+0000 mon.vm06 (mon.0) 36 : audit [INF] from='client.? 192.168.123.106:0/4126174770' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "cephadm"} : dispatch 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 5, 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "active_name": "vm06.qbbldl", 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for the mgr to restart... 2026-04-15T13:32:58.937 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr epoch 5... 2026-04-15T13:32:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:59 vm06 bash[28114]: audit 2026-04-15T13:32:58.413915+0000 mon.vm06 (mon.0) 37 : audit [INF] from='client.? 192.168.123.106:0/4126174770' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-04-15T13:32:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:59 vm06 bash[28114]: audit 2026-04-15T13:32:58.413915+0000 mon.vm06 (mon.0) 37 : audit [INF] from='client.? 192.168.123.106:0/4126174770' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "cephadm"}]': finished 2026-04-15T13:32:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:59 vm06 bash[28114]: cluster 2026-04-15T13:32:58.416443+0000 mon.vm06 (mon.0) 38 : cluster [DBG] mgrmap e5: vm06.qbbldl(active, since 4s) 2026-04-15T13:32:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:59 vm06 bash[28114]: cluster 2026-04-15T13:32:58.416443+0000 mon.vm06 (mon.0) 38 : cluster [DBG] mgrmap e5: vm06.qbbldl(active, since 4s) 2026-04-15T13:32:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:59 vm06 bash[28114]: audit 2026-04-15T13:32:58.889898+0000 mon.vm06 (mon.0) 39 : audit [DBG] from='client.? 192.168.123.106:0/4050440591' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-15T13:32:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:32:59 vm06 bash[28114]: audit 2026-04-15T13:32:58.889898+0000 mon.vm06 (mon.0) 39 : audit [DBG] from='client.? 192.168.123.106:0/4050440591' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.499658+0000 mon.vm06 (mon.0) 40 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.499658+0000 mon.vm06 (mon.0) 40 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.499891+0000 mon.vm06 (mon.0) 41 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.499891+0000 mon.vm06 (mon.0) 41 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.504467+0000 mon.vm06 (mon.0) 42 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.504467+0000 mon.vm06 (mon.0) 42 : cluster [DBG] osdmap e2: 0 total, 0 up, 0 in 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.504529+0000 mon.vm06 (mon.0) 43 : cluster [DBG] mgrmap e6: vm06.qbbldl(active, starting, since 0.00472851s) 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.504529+0000 mon.vm06 (mon.0) 43 : cluster [DBG] mgrmap e6: vm06.qbbldl(active, starting, since 0.00472851s) 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.506553+0000 mon.vm06 (mon.0) 44 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.506553+0000 mon.vm06 (mon.0) 44 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.506663+0000 mon.vm06 (mon.0) 45 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.506663+0000 mon.vm06 (mon.0) 45 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.507549+0000 mon.vm06 (mon.0) 46 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.507549+0000 mon.vm06 (mon.0) 46 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.507711+0000 mon.vm06 (mon.0) 47 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.507711+0000 mon.vm06 (mon.0) 47 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.507805+0000 mon.vm06 (mon.0) 48 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: audit 2026-04-15T13:33:08.507805+0000 mon.vm06 (mon.0) 48 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.512855+0000 mon.vm06 (mon.0) 49 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:33:09.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:08 vm06 bash[28114]: cluster 2026-04-15T13:33:08.512855+0000 mon.vm06 (mon.0) 49 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:33:09.571 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:33:09.571 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 7, 2026-04-15T13:33:09.571 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-15T13:33:09.571 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:33:09.571 INFO:teuthology.orchestra.run.vm06.stdout:mgr epoch 5 is available 2026-04-15T13:33:09.571 INFO:teuthology.orchestra.run.vm06.stdout:Verifying orchestrator module is enabled... 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.160344+0000 mon.vm06 (mon.0) 50 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.160344+0000 mon.vm06 (mon.0) 50 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.163433+0000 mon.vm06 (mon.0) 51 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.163433+0000 mon.vm06 (mon.0) 51 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: cephadm 2026-04-15T13:33:09.163903+0000 mgr.vm06.qbbldl (mgr.14126) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: cephadm 2026-04-15T13:33:09.163903+0000 mgr.vm06.qbbldl (mgr.14126) 1 : cephadm [INF] Found migration_current of "None". Setting to last migration. 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.165623+0000 mon.vm06 (mon.0) 52 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.165623+0000 mon.vm06 (mon.0) 52 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.222930+0000 mon.vm06 (mon.0) 53 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.222930+0000 mon.vm06 (mon.0) 53 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.225362+0000 mon.vm06 (mon.0) 54 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.225362+0000 mon.vm06 (mon.0) 54 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.228374+0000 mon.vm06 (mon.0) 55 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.228374+0000 mon.vm06 (mon.0) 55 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.235929+0000 mon.vm06 (mon.0) 56 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.235929+0000 mon.vm06 (mon.0) 56 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.240373+0000 mon.vm06 (mon.0) 57 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.240373+0000 mon.vm06 (mon.0) 57 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: cluster 2026-04-15T13:33:09.508203+0000 mon.vm06 (mon.0) 58 : cluster [DBG] mgrmap e7: vm06.qbbldl(active, since 1.0084s) 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: cluster 2026-04-15T13:33:09.508203+0000 mon.vm06 (mon.0) 58 : cluster [DBG] mgrmap e7: vm06.qbbldl(active, since 1.0084s) 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.997547+0000 mon.vm06 (mon.0) 59 : audit [INF] from='client.? 192.168.123.106:0/2076948171' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "orchestrator"} : dispatch 2026-04-15T13:33:10.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:10 vm06 bash[28114]: audit 2026-04-15T13:33:09.997547+0000 mon.vm06 (mon.0) 59 : audit [INF] from='client.? 192.168.123.106:0/2076948171' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "orchestrator"} : dispatch 2026-04-15T13:33:10.556 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stderr module 'orchestrator' is already enabled (always-on) 2026-04-15T13:33:10.556 INFO:teuthology.orchestra.run.vm06.stdout:Setting orchestrator backend to cephadm... 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:09.508301+0000 mgr.vm06.qbbldl (mgr.14126) 2 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:09.508301+0000 mgr.vm06.qbbldl (mgr.14126) 2 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:09.512397+0000 mgr.vm06.qbbldl (mgr.14126) 3 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:09.512397+0000 mgr.vm06.qbbldl (mgr.14126) 3 : audit [DBG] from='client.14130 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.042172+0000 mgr.vm06.qbbldl (mgr.14126) 4 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Bus STARTING 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.042172+0000 mgr.vm06.qbbldl (mgr.14126) 4 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Bus STARTING 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.144540+0000 mgr.vm06.qbbldl (mgr.14126) 5 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.144540+0000 mgr.vm06.qbbldl (mgr.14126) 5 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.255873+0000 mgr.vm06.qbbldl (mgr.14126) 6 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.255873+0000 mgr.vm06.qbbldl (mgr.14126) 6 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.255964+0000 mgr.vm06.qbbldl (mgr.14126) 7 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Bus STARTED 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.255964+0000 mgr.vm06.qbbldl (mgr.14126) 7 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Bus STARTED 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.256438+0000 mon.vm06 (mon.0) 60 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.256438+0000 mon.vm06 (mon.0) 60 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.256444+0000 mgr.vm06.qbbldl (mgr.14126) 8 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Client ('192.168.123.106', 57378) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cephadm 2026-04-15T13:33:10.256444+0000 mgr.vm06.qbbldl (mgr.14126) 8 : cephadm [INF] [15/Apr/2026:13:33:10] ENGINE Client ('192.168.123.106', 57378) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.508496+0000 mon.vm06 (mon.0) 61 : audit [INF] from='client.? 192.168.123.106:0/2076948171' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "orchestrator"}]': finished 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.508496+0000 mon.vm06 (mon.0) 61 : audit [INF] from='client.? 192.168.123.106:0/2076948171' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "orchestrator"}]': finished 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cluster 2026-04-15T13:33:10.510652+0000 mon.vm06 (mon.0) 62 : cluster [DBG] mgrmap e8: vm06.qbbldl(active, since 2s) 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: cluster 2026-04-15T13:33:10.510652+0000 mon.vm06 (mon.0) 62 : cluster [DBG] mgrmap e8: vm06.qbbldl(active, since 2s) 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.956570+0000 mon.vm06 (mon.0) 63 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.956570+0000 mon.vm06 (mon.0) 63 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:11.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.962702+0000 mon.vm06 (mon.0) 64 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:11.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:11 vm06 bash[28114]: audit 2026-04-15T13:33:10.962702+0000 mon.vm06 (mon.0) 64 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:11.435 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout value unchanged 2026-04-15T13:33:11.435 INFO:teuthology.orchestra.run.vm06.stdout:Generating ssh key... 2026-04-15T13:33:12.378 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3ypOP4OBBTc+AhOn0J37+8rZEduYkWaoFvuWl6QyqZ3WwuzTvfd2nW0/dTrvxZE7HmOVrG6j5CpjbsO8gaOWdq8L1oanxLoIIEUJEsvCYMmCqi3CIP/CsJRAm8NbP3vqZS+vcginTQnl15yuCEw3mQN6d8SIpCCWcORRnMQjH5R/seODsUynsW+CVk29vPV3BMm4b8P6JiDKaubj1K+HLDfueYDSkGwsGfM+68U0ekFJJoiEBb66L/X8ne8seEYFzqRgv3pd4ijRdbF3o/u4WxTTAeLksBPxnncfFQDgQOggXNwhDYATYp55k88B4Bsbhn4PfqOYBoavzQSthbsR1BZ9Xp5Wy4dLv+qxVOpSThb82HWrt3hG0sXdurfTb6BREV6DQKAyvarKPz5LkOXA8SQRXtUMhQSBDF2thl2+bTQn2kF76Ky3LJKCLfk6xxsFdzabDrqieyxtAxorHKCAokjLjfTu8OgUtFQvr5yfc/PUkRmlHs6OeaLYneNdIpCM= ceph-75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:33:12.378 INFO:teuthology.orchestra.run.vm06.stdout:Wrote public SSH key to /home/ubuntu/cephtest/ceph.pub 2026-04-15T13:33:12.378 INFO:teuthology.orchestra.run.vm06.stdout:Adding key to root@localhost authorized_keys... 2026-04-15T13:33:12.378 INFO:teuthology.orchestra.run.vm06.stdout:Adding host vm06... 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:10.952735+0000 mgr.vm06.qbbldl (mgr.14126) 9 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:10.952735+0000 mgr.vm06.qbbldl (mgr.14126) 9 : audit [DBG] from='client.14140 -' entity='client.admin' cmd=[{"prefix": "orch set backend", "module_name": "cephadm", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:11.391559+0000 mgr.vm06.qbbldl (mgr.14126) 10 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:11.391559+0000 mgr.vm06.qbbldl (mgr.14126) 10 : audit [DBG] from='client.14142 -' entity='client.admin' cmd=[{"prefix": "cephadm set-user", "user": "root", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:11.908716+0000 mon.vm06 (mon.0) 65 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:11.908716+0000 mon.vm06 (mon.0) 65 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:11.910709+0000 mon.vm06 (mon.0) 66 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:12.422 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:12 vm06 bash[28114]: audit 2026-04-15T13:33:11.910709+0000 mon.vm06 (mon.0) 66 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:13.166 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:13 vm06 bash[28114]: audit 2026-04-15T13:33:11.791726+0000 mgr.vm06.qbbldl (mgr.14126) 11 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:13.166 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:13 vm06 bash[28114]: audit 2026-04-15T13:33:11.791726+0000 mgr.vm06.qbbldl (mgr.14126) 11 : audit [DBG] from='client.14144 -' entity='client.admin' cmd=[{"prefix": "cephadm generate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:13.166 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:13 vm06 bash[28114]: cephadm 2026-04-15T13:33:11.791937+0000 mgr.vm06.qbbldl (mgr.14126) 12 : cephadm [INF] Generating ssh key... 2026-04-15T13:33:13.166 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:13 vm06 bash[28114]: cephadm 2026-04-15T13:33:11.791937+0000 mgr.vm06.qbbldl (mgr.14126) 12 : cephadm [INF] Generating ssh key... 2026-04-15T13:33:13.166 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:13 vm06 bash[28114]: audit 2026-04-15T13:33:12.334027+0000 mgr.vm06.qbbldl (mgr.14126) 13 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:13.166 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:13 vm06 bash[28114]: audit 2026-04-15T13:33:12.334027+0000 mgr.vm06.qbbldl (mgr.14126) 13 : audit [DBG] from='client.14146 -' entity='client.admin' cmd=[{"prefix": "cephadm get-pub-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:14.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:14 vm06 bash[28114]: audit 2026-04-15T13:33:12.724195+0000 mgr.vm06.qbbldl (mgr.14126) 14 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "addr": "192.168.123.106", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:14.204 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:14 vm06 bash[28114]: audit 2026-04-15T13:33:12.724195+0000 mgr.vm06.qbbldl (mgr.14126) 14 : audit [DBG] from='client.14148 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm06", "addr": "192.168.123.106", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:15.261 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:15 vm06 bash[28114]: cephadm 2026-04-15T13:33:13.645639+0000 mgr.vm06.qbbldl (mgr.14126) 15 : cephadm [INF] Deploying cephadm binary to vm06 2026-04-15T13:33:15.261 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:15 vm06 bash[28114]: cephadm 2026-04-15T13:33:13.645639+0000 mgr.vm06.qbbldl (mgr.14126) 15 : cephadm [INF] Deploying cephadm binary to vm06 2026-04-15T13:33:15.659 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Added host 'vm06' with addr '192.168.123.106' 2026-04-15T13:33:15.659 INFO:teuthology.orchestra.run.vm06.stdout:Deploying mon service with default placement... 2026-04-15T13:33:16.076 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled mon update... 2026-04-15T13:33:16.076 INFO:teuthology.orchestra.run.vm06.stdout:Deploying mgr service with default placement... 2026-04-15T13:33:16.490 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled mgr update... 2026-04-15T13:33:16.490 INFO:teuthology.orchestra.run.vm06.stdout:Deploying crash service with default placement... 2026-04-15T13:33:16.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:15.613065+0000 mon.vm06 (mon.0) 67 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:16.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:15.613065+0000 mon.vm06 (mon.0) 67 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:16.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: cephadm 2026-04-15T13:33:15.613332+0000 mgr.vm06.qbbldl (mgr.14126) 16 : cephadm [INF] Added host vm06 2026-04-15T13:33:16.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: cephadm 2026-04-15T13:33:15.613332+0000 mgr.vm06.qbbldl (mgr.14126) 16 : cephadm [INF] Added host vm06 2026-04-15T13:33:16.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:15.616533+0000 mon.vm06 (mon.0) 68 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:16.723 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:15.616533+0000 mon.vm06 (mon.0) 68 : audit [DBG] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.032385+0000 mgr.vm06.qbbldl (mgr.14126) 17 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.032385+0000 mgr.vm06.qbbldl (mgr.14126) 17 : audit [DBG] from='client.14150 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: cephadm 2026-04-15T13:33:16.033392+0000 mgr.vm06.qbbldl (mgr.14126) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: cephadm 2026-04-15T13:33:16.033392+0000 mgr.vm06.qbbldl (mgr.14126) 18 : cephadm [INF] Saving service mon spec with placement count:5 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.036530+0000 mon.vm06 (mon.0) 69 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.036530+0000 mon.vm06 (mon.0) 69 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.440501+0000 mgr.vm06.qbbldl (mgr.14126) 19 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.440501+0000 mgr.vm06.qbbldl (mgr.14126) 19 : audit [DBG] from='client.14152 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mgr", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: cephadm 2026-04-15T13:33:16.441427+0000 mgr.vm06.qbbldl (mgr.14126) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: cephadm 2026-04-15T13:33:16.441427+0000 mgr.vm06.qbbldl (mgr.14126) 20 : cephadm [INF] Saving service mgr spec with placement count:2 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.444991+0000 mon.vm06 (mon.0) 70 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:16.724 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:16 vm06 bash[28114]: audit 2026-04-15T13:33:16.444991+0000 mon.vm06 (mon.0) 70 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:17.014 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled crash update... 2026-04-15T13:33:17.014 INFO:teuthology.orchestra.run.vm06.stdout:Deploying ceph-exporter service with default placement... 2026-04-15T13:33:17.466 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled ceph-exporter update... 2026-04-15T13:33:17.466 INFO:teuthology.orchestra.run.vm06.stdout:Deploying prometheus service with default placement... 2026-04-15T13:33:17.954 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled prometheus update... 2026-04-15T13:33:17.954 INFO:teuthology.orchestra.run.vm06.stdout:Deploying grafana service with default placement... 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:16.941697+0000 mgr.vm06.qbbldl (mgr.14126) 21 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:16.941697+0000 mgr.vm06.qbbldl (mgr.14126) 21 : audit [DBG] from='client.14154 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "crash", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: cephadm 2026-04-15T13:33:16.942522+0000 mgr.vm06.qbbldl (mgr.14126) 22 : cephadm [INF] Saving service crash spec with placement * 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: cephadm 2026-04-15T13:33:16.942522+0000 mgr.vm06.qbbldl (mgr.14126) 22 : cephadm [INF] Saving service crash spec with placement * 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:16.945280+0000 mon.vm06 (mon.0) 71 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:16.945280+0000 mon.vm06 (mon.0) 71 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.112871+0000 mon.vm06 (mon.0) 72 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.112871+0000 mon.vm06 (mon.0) 72 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.410475+0000 mgr.vm06.qbbldl (mgr.14126) 23 : audit [DBG] from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.410475+0000 mgr.vm06.qbbldl (mgr.14126) 23 : audit [DBG] from='client.14156 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "ceph-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: cephadm 2026-04-15T13:33:17.411241+0000 mgr.vm06.qbbldl (mgr.14126) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: cephadm 2026-04-15T13:33:17.411241+0000 mgr.vm06.qbbldl (mgr.14126) 24 : cephadm [INF] Saving service ceph-exporter spec with placement * 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.414013+0000 mon.vm06 (mon.0) 73 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.414013+0000 mon.vm06 (mon.0) 73 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.429860+0000 mon.vm06 (mon.0) 74 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.429860+0000 mon.vm06 (mon.0) 74 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.904743+0000 mon.vm06 (mon.0) 75 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:17 vm06 bash[28114]: audit 2026-04-15T13:33:17.904743+0000 mon.vm06 (mon.0) 75 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:18.372 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled grafana update... 2026-04-15T13:33:18.372 INFO:teuthology.orchestra.run.vm06.stdout:Deploying node-exporter service with default placement... 2026-04-15T13:33:18.784 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled node-exporter update... 2026-04-15T13:33:18.784 INFO:teuthology.orchestra.run.vm06.stdout:Deploying alertmanager service with default placement... 2026-04-15T13:33:19.222 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout Scheduled alertmanager update... 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:17.900454+0000 mgr.vm06.qbbldl (mgr.14126) 25 : audit [DBG] from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:17.900454+0000 mgr.vm06.qbbldl (mgr.14126) 25 : audit [DBG] from='client.14158 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "prometheus", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: cephadm 2026-04-15T13:33:17.901251+0000 mgr.vm06.qbbldl (mgr.14126) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: cephadm 2026-04-15T13:33:17.901251+0000 mgr.vm06.qbbldl (mgr.14126) 26 : cephadm [INF] Saving service prometheus spec with placement count:1 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:18.323339+0000 mgr.vm06.qbbldl (mgr.14126) 27 : audit [DBG] from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:18.323339+0000 mgr.vm06.qbbldl (mgr.14126) 27 : audit [DBG] from='client.14160 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "grafana", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: cephadm 2026-04-15T13:33:18.324251+0000 mgr.vm06.qbbldl (mgr.14126) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: cephadm 2026-04-15T13:33:18.324251+0000 mgr.vm06.qbbldl (mgr.14126) 28 : cephadm [INF] Saving service grafana spec with placement count:1 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:18.327282+0000 mon.vm06 (mon.0) 76 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:18.327282+0000 mon.vm06 (mon.0) 76 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:18.739883+0000 mon.vm06 (mon.0) 77 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:18.739883+0000 mon.vm06 (mon.0) 77 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:19.176074+0000 mon.vm06 (mon.0) 78 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:19 vm06 bash[28114]: audit 2026-04-15T13:33:19.176074+0000 mon.vm06 (mon.0) 78 : audit [INF] from='mgr.14126 192.168.123.106:0/523610147' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:20.064 INFO:teuthology.orchestra.run.vm06.stdout:Enabling the dashboard module... 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:18.736318+0000 mgr.vm06.qbbldl (mgr.14126) 29 : audit [DBG] from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:18.736318+0000 mgr.vm06.qbbldl (mgr.14126) 29 : audit [DBG] from='client.14162 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "node-exporter", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: cephadm 2026-04-15T13:33:18.737067+0000 mgr.vm06.qbbldl (mgr.14126) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: cephadm 2026-04-15T13:33:18.737067+0000 mgr.vm06.qbbldl (mgr.14126) 30 : cephadm [INF] Saving service node-exporter spec with placement * 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:19.171928+0000 mgr.vm06.qbbldl (mgr.14126) 31 : audit [DBG] from='client.14164 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:19.171928+0000 mgr.vm06.qbbldl (mgr.14126) 31 : audit [DBG] from='client.14164 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "alertmanager", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: cephadm 2026-04-15T13:33:19.172863+0000 mgr.vm06.qbbldl (mgr.14126) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: cephadm 2026-04-15T13:33:19.172863+0000 mgr.vm06.qbbldl (mgr.14126) 32 : cephadm [INF] Saving service alertmanager spec with placement count:1 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:19.582148+0000 mon.vm06 (mon.0) 79 : audit [INF] from='client.? 192.168.123.106:0/3999131346' entity='client.admin' 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:19.582148+0000 mon.vm06 (mon.0) 79 : audit [INF] from='client.? 192.168.123.106:0/3999131346' entity='client.admin' 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:19.988841+0000 mon.vm06 (mon.0) 80 : audit [INF] from='client.? 192.168.123.106:0/3828933864' entity='client.admin' 2026-04-15T13:33:20.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:20 vm06 bash[28114]: audit 2026-04-15T13:33:19.988841+0000 mon.vm06 (mon.0) 80 : audit [INF] from='client.? 192.168.123.106:0/3828933864' entity='client.admin' 2026-04-15T13:33:21.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:21 vm06 bash[28114]: audit 2026-04-15T13:33:20.448121+0000 mon.vm06 (mon.0) 81 : audit [INF] from='client.? 192.168.123.106:0/2192137870' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "dashboard"} : dispatch 2026-04-15T13:33:21.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:21 vm06 bash[28114]: audit 2026-04-15T13:33:20.448121+0000 mon.vm06 (mon.0) 81 : audit [INF] from='client.? 192.168.123.106:0/2192137870' entity='client.admin' cmd={"prefix": "mgr module enable", "module": "dashboard"} : dispatch 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "epoch": 9, 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "available": true, 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "active_name": "vm06.qbbldl", 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "num_standby": 0 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for the mgr to restart... 2026-04-15T13:33:21.870 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for mgr epoch 9... 2026-04-15T13:33:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:22 vm06 bash[28114]: audit 2026-04-15T13:33:21.334329+0000 mon.vm06 (mon.0) 82 : audit [INF] from='client.? 192.168.123.106:0/2192137870' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-04-15T13:33:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:22 vm06 bash[28114]: audit 2026-04-15T13:33:21.334329+0000 mon.vm06 (mon.0) 82 : audit [INF] from='client.? 192.168.123.106:0/2192137870' entity='client.admin' cmd='[{"prefix": "mgr module enable", "module": "dashboard"}]': finished 2026-04-15T13:33:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:22 vm06 bash[28114]: cluster 2026-04-15T13:33:21.335533+0000 mon.vm06 (mon.0) 83 : cluster [DBG] mgrmap e9: vm06.qbbldl(active, since 12s) 2026-04-15T13:33:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:22 vm06 bash[28114]: cluster 2026-04-15T13:33:21.335533+0000 mon.vm06 (mon.0) 83 : cluster [DBG] mgrmap e9: vm06.qbbldl(active, since 12s) 2026-04-15T13:33:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:22 vm06 bash[28114]: audit 2026-04-15T13:33:21.818439+0000 mon.vm06 (mon.0) 84 : audit [DBG] from='client.? 192.168.123.106:0/1843944742' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-15T13:33:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:22 vm06 bash[28114]: audit 2026-04-15T13:33:21.818439+0000 mon.vm06 (mon.0) 84 : audit [DBG] from='client.? 192.168.123.106:0/1843944742' entity='client.admin' cmd={"prefix": "mgr stat"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.098070+0000 mon.vm06 (mon.0) 85 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.098070+0000 mon.vm06 (mon.0) 85 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.098307+0000 mon.vm06 (mon.0) 86 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.098307+0000 mon.vm06 (mon.0) 86 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.102362+0000 mon.vm06 (mon.0) 87 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.102362+0000 mon.vm06 (mon.0) 87 : cluster [DBG] osdmap e3: 0 total, 0 up, 0 in 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.102448+0000 mon.vm06 (mon.0) 88 : cluster [DBG] mgrmap e10: vm06.qbbldl(active, starting, since 0.00424088s) 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.102448+0000 mon.vm06 (mon.0) 88 : cluster [DBG] mgrmap e10: vm06.qbbldl(active, starting, since 0.00424088s) 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.105383+0000 mon.vm06 (mon.0) 89 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.105383+0000 mon.vm06 (mon.0) 89 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.106090+0000 mon.vm06 (mon.0) 90 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.106090+0000 mon.vm06 (mon.0) 90 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.106946+0000 mon.vm06 (mon.0) 91 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.106946+0000 mon.vm06 (mon.0) 91 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.107044+0000 mon.vm06 (mon.0) 92 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.107044+0000 mon.vm06 (mon.0) 92 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.107246+0000 mon.vm06 (mon.0) 93 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: audit 2026-04-15T13:33:29.107246+0000 mon.vm06 (mon.0) 93 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.112337+0000 mon.vm06 (mon.0) 94 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:33:29.460 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:29 vm06 bash[28114]: cluster 2026-04-15T13:33:29.112337+0000 mon.vm06 (mon.0) 94 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:33:30.151 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout { 2026-04-15T13:33:30.151 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "mgrmap_epoch": 11, 2026-04-15T13:33:30.151 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout "initialized": true 2026-04-15T13:33:30.151 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout } 2026-04-15T13:33:30.151 INFO:teuthology.orchestra.run.vm06.stdout:mgr epoch 9 is available 2026-04-15T13:33:30.151 INFO:teuthology.orchestra.run.vm06.stdout:Using certmgr to generate dashboard self-signed certificate... 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: audit 2026-04-15T13:33:29.363476+0000 mon.vm06 (mon.0) 95 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: audit 2026-04-15T13:33:29.363476+0000 mon.vm06 (mon.0) 95 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: audit 2026-04-15T13:33:29.375963+0000 mon.vm06 (mon.0) 96 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: audit 2026-04-15T13:33:29.375963+0000 mon.vm06 (mon.0) 96 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: audit 2026-04-15T13:33:29.376924+0000 mon.vm06 (mon.0) 97 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: audit 2026-04-15T13:33:29.376924+0000 mon.vm06 (mon.0) 97 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: cluster 2026-04-15T13:33:30.106099+0000 mon.vm06 (mon.0) 98 : cluster [DBG] mgrmap e11: vm06.qbbldl(active, since 1.00789s) 2026-04-15T13:33:30.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:30 vm06 bash[28114]: cluster 2026-04-15T13:33:30.106099+0000 mon.vm06 (mon.0) 98 : cluster [DBG] mgrmap e11: vm06.qbbldl(active, since 1.00789s) 2026-04-15T13:33:30.948 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout {"cert": "-----BEGIN CERTIFICATE-----\nMIIE+TCCAuGgAwIBAgIURZf009P9fNL4Ir2jceQw6Du65GswDQYJKoZIhvcNAQEL\nBQAwFzEVMBMGA1UEAwwMY2VwaGFkbS1yb290MB4XDTI2MDQxNTEzMzMzMFoXDTI5\nMDQxNDEzMzMzMFowGjEYMBYGA1UEAwwPMTkyLjE2OC4xMjMuMTA2MIICIjANBgkq\nhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAyfYkyFjit6pomOgvwGcNFjy9aCeO27Jv\nPVYSo67GvccpcY8L7EZdkMt2OgMfSODoxMgE8cxwJTftTTK8TDFANzR9YVBBqipk\nDV1PgEfGXh9B2+rbD0XNCmGaECRFDSRTeLejschzI77heWGvQYIxWs8mN7LtLCli\nzHZx11fAkUY7IuXGqDsVOMF0N+Ak/Awbx1oKiEUzBYWwGwd9h/4J775QNTzLtaRV\nPexjFzOh8ORJanW11H/JKhcykMd8Z5Rm9OQXkMMIZRovN2mQ9/9xDl3jGvwbspry\nP6k38cs8w1x7c5sDHIXDSfzKkncq3LJe89tqIn7faz1PH4Rb/4Va+5dqrSbo2btg\neB+Co0UU2GJgr4umWAxNrwNNtz9iCo1eZHoU1IFefg/tZ/SIJJXVGXmKmWNIV8o0\npYxxVG8ecq/83ydepGi5MbHRbJrqxI8LJ3Uz6g0FhAJUP9VQIOedY/y8/Iv6Ej+G\nRqXWLT1mQ5kOLG+PsieWHhB0RoUGU0drZN2Yw+WiYcsYuTa0L1N6m8/1FaAa7X33\n3xK4s2DQHusgyj424/ASYlDLEC8BgmGCtXv+ebaqE61nAjyglP4qDrxWG89gkvu1\n7ROyiCaijsYwoa5UoJU8MFU/Rdpdl3psABj1HyseUFOQKG7QPjkwDcBfnBTYyh35\nryetTO0g7gECAwEAAaM6MDgwKAYDVR0RBCEwH4IEdm0wNoIRZGFzaGJvYXJkX3Nl\ncnZlcnOHBMCoe2owDAYDVR0TAQH/BAIwADANBgkqhkiG9w0BAQsFAAOCAgEAkQ+H\ng2u29UpojezPcvUVIRrXAUsELqBOodRcF86A4/pxag9KV0YtG1MsSr1vM8oFoFou\nicWeT3wJUBljxhbJ40CaiTJ20KqhoCgzYwegJwvtEELM7nJNXbDoXTt9VoF9+sa/\n5gTdof0XsjXVUNAfAq49oBvWFmTTcZvETCmwunvnILNwEuI7u0E8aR0BuhodrLBA\nwLFaC/vil/PShmvqGDuT958+mk7U/Pr4eQtJQu0YlF+pm8K8pIOz4bUKe0lHqL16\nGQj1Ie3MFcPrgknR8poN8YYRz0c24tvQ46rwIB3fqUYbM/iRDBUgNfF3b2ngEsM1\nbnAfv3Y1YDkFnkl6HVSnRS0/MSTIyxHNPaS/30CqkMlhPEhAXpF9K0pzJyecPHT+\npQPvD2mdtJ8D0kvaDcP/pTlBFRv8MQU+jL9d8URqBlYME+o/VnF97ncN161QFkUp\nZ8VhiyAcAaV/Krrd2DeXefYSL7TVGcuiob/spUics3n2ofBb5O2lQA5lMuifJ+OJ\neL+n7Obuk3lOR7Pd+ifUapVVWGDNRF8hqVoit/ncR2eAa5S19EoJ6IlR6jGi99JT\nwaHiD2b60inr+uiyjaHmgaE1jViXpmzhV0Fps6hDxJrtd50g1Q1dYGEt8bs889rR\ne4NytCiOn+A34c+mrcmxhpN5XOC80TZsk6U/iKI=\n-----END CERTIFICATE-----\n", "key": "-----BEGIN RSA PRIVATE KEY-----\nMIIJJwIBAAKCAgEAyfYkyFjit6pomOgvwGcNFjy9aCeO27JvPVYSo67GvccpcY8L\n7EZdkMt2OgMfSODoxMgE8cxwJTftTTK8TDFANzR9YVBBqipkDV1PgEfGXh9B2+rb\nD0XNCmGaECRFDSRTeLejschzI77heWGvQYIxWs8mN7LtLClizHZx11fAkUY7IuXG\nqDsVOMF0N+Ak/Awbx1oKiEUzBYWwGwd9h/4J775QNTzLtaRVPexjFzOh8ORJanW1\n1H/JKhcykMd8Z5Rm9OQXkMMIZRovN2mQ9/9xDl3jGvwbspryP6k38cs8w1x7c5sD\nHIXDSfzKkncq3LJe89tqIn7faz1PH4Rb/4Va+5dqrSbo2btgeB+Co0UU2GJgr4um\nWAxNrwNNtz9iCo1eZHoU1IFefg/tZ/SIJJXVGXmKmWNIV8o0pYxxVG8ecq/83yde\npGi5MbHRbJrqxI8LJ3Uz6g0FhAJUP9VQIOedY/y8/Iv6Ej+GRqXWLT1mQ5kOLG+P\nsieWHhB0RoUGU0drZN2Yw+WiYcsYuTa0L1N6m8/1FaAa7X333xK4s2DQHusgyj42\n4/ASYlDLEC8BgmGCtXv+ebaqE61nAjyglP4qDrxWG89gkvu17ROyiCaijsYwoa5U\noJU8MFU/Rdpdl3psABj1HyseUFOQKG7QPjkwDcBfnBTYyh35ryetTO0g7gECAwEA\nAQKCAgBhxKS90DnAy9/i1dP4AmsVbsYKW8wOkuzC9p4DWyKe0IFBlgwhj1bQN46+\nOrC0aRyF1oZ+P4eynMZl7r+06sq5btGbgGypYegZiChqY1SmT99kBOeTFylRczpP\nwh8jY4h9EflOoUkL7uXcCuv6fHpECa/omtex3z8S1+v4YWS5bxRFqpJm8mMtnIkU\nW4/L7FiuQuqCWW1/qN4913xvjw1kX+8YNpwvDKSvW3YEz1v/76OSw/UIL+xEUgKK\nj+erhO3qc1jtHV09BFYDLzmctK4YVF8SuyT1a4kGRHbuuIJr2KNyrYHuFtsZQCUY\njlPTjw3kYz2YAW9KzN021q5qnjYSZWKEQ0kTd/k4p1mIfqQrIbpkcJ3k5Eaepm+X\n7ANtUEB4niHHuoZXKkAjahEFELNM35A7oQQozW6z99yFQSLLh4Xd6VCy/h6gptaC\nxbIbTWatl4Le6zptXeRNQFFR5Gt9aiGpiQ2+jaus3mk2jjx7Bi+D8qR5Dw9+CPmv\no7NH+oh8pIPvERaEPJryhbwE+Ov4Q7bAw6qhi6hGOWYk1AnkGKYRYKEmCWyw/O7J\niRCsKeNW41Y5Fxi9cM5/KyGI12iJMmVAXcJzPGYW9sOVtdOme+RpC8oK4Vt3k2l/\nddedFuYrxDHAQVJ4nCV+IFQGGueI1aAAcBRNVgBNnQnw0gwGGQKCAQEA6Anm7DBL\ny+jAT1jvkU7vhGk+o4SwCBGbixuakectKeDFBsWJTaeiitfacFxg2xpL25LR0/fg\nqhPkJGCp82JfpjjzVvFZ02Dk32kI07z4is4HAaVgl2RIlxZjjtT5G+CC3urbGgsc\nh7lCE5RJCRbwaZ2dUzm3tG0WkrPUGm6DJzjK2Umye+s4Ef28dh/O0h1H83CmLyW1\n/xNug9zNrM04mCx5PeetoRLJ29kz91itveag4tFu3CqBIIr1v1k0rHrYT6HnN2rh\nez7Vyb6PNsaruDshyRiVjba702cVacOZdo8BOKoEYIwOleQDV/+7BSGQM6KgovrQ\nGTjnsdzyGgpeYwKCAQEA3tEhltfO2dIeespXurUH6vIMrjV2OpeOUiwW74XLuExx\nWttwWYJlD5JR3i+tUNP3T6gQPHJK3JwavjDN4Iyr0NWtfr8kR+ad0uHt97coN8zo\n814SDWHmrLuqHONsLOkp839w9966Xc//wu3y7ULfSqkS6PLPxKMz3hERftKnyCTb\nybj8fVfGj57oaaOKiaboewCQ/Tg+uScTP+yVE0kNIcvPMjh30zJQhA1Klm/3RGHd\nID5a78k05210OoaOtXCG6hPuVf8T59XwshJ4QX2ztXMxPIBFGXKx7AqMqKQJ7+Ph\nmTmGy6WPAWtx2ADkK5IhttHhcg4GVF2Y03/oNhbNSwKCAQAo27WB2W8/VJioYCyv\nFYsvcajJ+k9U3GTbNd0d2+sj+KQ48QegWOhSMngQWPPlINhCrbOs83Tn/eCqVysV\n5DibeGPA53a9tc4XWNNpFVled7DcJVbA0bPGhva469+BfSMORaSXDWXUsFZmRDIo\nG5XxwwM06HDkoGDdLDUpAlStz15IrKV1FJIm+Nr6odc1QFqvDyL0IlvNsoo/Rd4h\nk3Fnuk0PDYCRYFGTGMgBwee2DKo/xkkTE5jxNgbXzslrpaX5P7ULGRLjNMlIYPmu\n14TdR+xHEMEk1wxzyS8IFJ0tNzkr1SQ4K/FVi3TbRfiUirhtnzFSAYArMM37j8BB\nvZmpAoIBADZmtx2+KRnTjG+Mcaj7RCif2mq6UNhzYmM7sonej+O6fj6hryebh5Mq\ntWRbHaTLa+l97iDkAtUkI/9lqQ53Kgf987Mrde88JjJzyQcfJ+mf1Qx+Xkf4LDrm\nQt10xVedwglahzltnvECtL5iHFrDZgMzjpNjnUi0VBiW8D+XrTFXlnYE/ViZErjE\nIpiQPmXnY4GMnNnGXsIUAUYtPCSORG+I9Fidotvkv/XNzPU39xYyEqwprUg4Rvtu\nJN+ZNMSl6brDDQDgG3sSKAdCLSxIRZU6v722z6gr8bbgyIDwNCDm8ZDYk8mq7WR6\nWDxV01+z+RlK2xFezTrR81tIMmjqXksCggEAR974ek7Dg1kcgsripXpBSMK5t9AA\nPBpLNb2SfkkmzfDcXcENQV5KjvnaKyqXgRu576/xJoksG/4xYZRqHe3x4obvccx4\njYXEiNxJUYtXpft0DwsCfXIOxSJp2RmlbZAk9FZ3uZ4rmT/W2TAJGTLQPCaSX6tR\n41s4r8A1v1tSYL4kQO1ZnAc37NrzjB3qGk8Z22Piplqc1WW5WL3J/TfocK9KoGsj\nwzJz7yofik1GzwOLlEgEs9fpt+KwElCY9Mh4pcNE/ksdglAAwf0rGJg9cUzTAuNk\nKG/uL2Gb6+hw1j9epSHx2gB/FWbetM8c8a827qoG/a5SwiaKottzV4F7zA==\n-----END RSA PRIVATE KEY-----\n"} 2026-04-15T13:33:31.402 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout SSL certificate updated 2026-04-15T13:33:31.447 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.057136+0000 mgr.vm06.qbbldl (mgr.14172) 1 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Bus STARTING 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.057136+0000 mgr.vm06.qbbldl (mgr.14172) 1 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Bus STARTING 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: audit 2026-04-15T13:33:30.106715+0000 mgr.vm06.qbbldl (mgr.14172) 2 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: audit 2026-04-15T13:33:30.106715+0000 mgr.vm06.qbbldl (mgr.14172) 2 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "get_command_descriptions"}]: dispatch 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: audit 2026-04-15T13:33:30.110650+0000 mgr.vm06.qbbldl (mgr.14172) 3 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: audit 2026-04-15T13:33:30.110650+0000 mgr.vm06.qbbldl (mgr.14172) 3 : audit [DBG] from='client.14176 -' entity='client.admin' cmd=[{"prefix": "mgr_status"}]: dispatch 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.166807+0000 mgr.vm06.qbbldl (mgr.14172) 4 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.166807+0000 mgr.vm06.qbbldl (mgr.14172) 4 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.167300+0000 mgr.vm06.qbbldl (mgr.14172) 5 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Client ('192.168.123.106', 49098) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.167300+0000 mgr.vm06.qbbldl (mgr.14172) 5 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Client ('192.168.123.106', 49098) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.267992+0000 mgr.vm06.qbbldl (mgr.14172) 6 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.267992+0000 mgr.vm06.qbbldl (mgr.14172) 6 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.268043+0000 mgr.vm06.qbbldl (mgr.14172) 7 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Bus STARTED 2026-04-15T13:33:31.448 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:31 vm06 bash[28114]: cephadm 2026-04-15T13:33:30.268043+0000 mgr.vm06.qbbldl (mgr.14172) 7 : cephadm [INF] [15/Apr/2026:13:33:30] ENGINE Bus STARTED 2026-04-15T13:33:31.832 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout SSL certificate key updated 2026-04-15T13:33:31.833 INFO:teuthology.orchestra.run.vm06.stdout:Creating initial admin user... 2026-04-15T13:33:32.406 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout {"username": "admin", "password": "$2b$12$2sKODGnq3jLPzuH0OyYYReh2J93TgZxAMw7ylxCdX8vB8iBeuW62G", "roles": ["administrator"], "name": null, "email": null, "lastUpdate": 1776260012, "enabled": true, "pwdExpirationDate": null, "pwdUpdateRequired": true} 2026-04-15T13:33:32.406 INFO:teuthology.orchestra.run.vm06.stdout:Fetching dashboard port number... 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:30.546521+0000 mgr.vm06.qbbldl (mgr.14172) 8 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch certmgr generate-certificates", "module_name": "dashboard", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:30.546521+0000 mgr.vm06.qbbldl (mgr.14172) 8 : audit [DBG] from='client.14184 -' entity='client.admin' cmd=[{"prefix": "orch certmgr generate-certificates", "module_name": "dashboard", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: cluster 2026-04-15T13:33:31.195436+0000 mon.vm06 (mon.0) 99 : cluster [DBG] mgrmap e12: vm06.qbbldl(active, since 2s) 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: cluster 2026-04-15T13:33:31.195436+0000 mon.vm06 (mon.0) 99 : cluster [DBG] mgrmap e12: vm06.qbbldl(active, since 2s) 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:31.336836+0000 mgr.vm06.qbbldl (mgr.14172) 9 : audit [DBG] from='client.14186 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:31.336836+0000 mgr.vm06.qbbldl (mgr.14172) 9 : audit [DBG] from='client.14186 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:31.339820+0000 mon.vm06 (mon.0) 100 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:31.339820+0000 mon.vm06 (mon.0) 100 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:31.789542+0000 mon.vm06 (mon.0) 101 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:32 vm06 bash[28114]: audit 2026-04-15T13:33:31.789542+0000 mon.vm06 (mon.0) 101 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:32.829 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stdout 8443 2026-04-15T13:33:32.829 INFO:teuthology.orchestra.run.vm06.stdout:firewalld does not appear to be present 2026-04-15T13:33:32.829 INFO:teuthology.orchestra.run.vm06.stdout:Not possible to open ports <[8443]>. firewalld.service is not available 2026-04-15T13:33:32.830 INFO:teuthology.orchestra.run.vm06.stdout:Ceph Dashboard is now available at: 2026-04-15T13:33:32.830 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:32.830 INFO:teuthology.orchestra.run.vm06.stdout: URL: https://vm06.local:8443/ 2026-04-15T13:33:32.830 INFO:teuthology.orchestra.run.vm06.stdout: User: admin 2026-04-15T13:33:32.830 INFO:teuthology.orchestra.run.vm06.stdout: Password: ibw16mb1vn 2026-04-15T13:33:32.830 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:32.831 INFO:teuthology.orchestra.run.vm06.stdout:Saving cluster configuration to /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config directory 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout:/usr/bin/ceph: stderr set mgr/dashboard/cluster/status 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout:You can access the Ceph CLI as following in case of multi-cluster or non-default config: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: sudo /home/ubuntu/cephtest/cephadm shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout:Or, if you are only running a single cluster on this host: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: sudo /home/ubuntu/cephtest/cephadm shell 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout:Please consider enabling telemetry to help improve Ceph: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: ceph telemetry on 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout:For more information see: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: https://docs.ceph.com/en/latest/mgr/telemetry/ 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:33.304 INFO:teuthology.orchestra.run.vm06.stdout:Bootstrap complete. 2026-04-15T13:33:33.311 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout static 2026-04-15T13:33:33.314 INFO:teuthology.orchestra.run.vm06.stdout:Non-zero exit code 3 from systemctl is-active logrotate 2026-04-15T13:33:33.314 INFO:teuthology.orchestra.run.vm06.stdout:systemctl: stdout inactive 2026-04-15T13:33:33.314 INFO:teuthology.orchestra.run.vm06.stdout:Enabling the logrotate.timer service to perform daily log rotation. 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:31.785645+0000 mgr.vm06.qbbldl (mgr.14172) 10 : audit [DBG] from='client.14188 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:31.785645+0000 mgr.vm06.qbbldl (mgr.14172) 10 : audit [DBG] from='client.14188 -' entity='client.admin' cmd=[{"prefix": "dashboard set-ssl-certificate-key", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:32.206107+0000 mgr.vm06.qbbldl (mgr.14172) 11 : audit [DBG] from='client.14190 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:32.206107+0000 mgr.vm06.qbbldl (mgr.14172) 11 : audit [DBG] from='client.14190 -' entity='client.admin' cmd=[{"prefix": "dashboard ac-user-create", "username": "admin", "rolename": "administrator", "force_password": true, "pwd_update_required": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:32.358149+0000 mon.vm06 (mon.0) 102 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:32.358149+0000 mon.vm06 (mon.0) 102 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:32.770298+0000 mon.vm06 (mon.0) 103 : audit [DBG] from='client.? 192.168.123.106:0/3729298743' entity='client.admin' cmd={"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"} : dispatch 2026-04-15T13:33:33.521 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:32.770298+0000 mon.vm06 (mon.0) 103 : audit [DBG] from='client.? 192.168.123.106:0/3729298743' entity='client.admin' cmd={"prefix": "config get", "who": "mgr", "key": "mgr/dashboard/ssl_server_port"} : dispatch 2026-04-15T13:33:33.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:33.259906+0000 mon.vm06 (mon.0) 104 : audit [INF] from='client.? 192.168.123.106:0/415424445' entity='client.admin' 2026-04-15T13:33:33.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 bash[28114]: audit 2026-04-15T13:33:33.259906+0000 mon.vm06 (mon.0) 104 : audit [INF] from='client.? 192.168.123.106:0/415424445' entity='client.admin' 2026-04-15T13:33:33.522 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:33 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:33.527 INFO:tasks.cephadm:Fetching config... 2026-04-15T13:33:33.527 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:33:33.527 DEBUG:teuthology.orchestra.run.vm06:> dd if=/etc/ceph/ceph.conf of=/dev/stdout 2026-04-15T13:33:33.530 INFO:tasks.cephadm:Fetching client.admin keyring... 2026-04-15T13:33:33.530 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:33:33.530 DEBUG:teuthology.orchestra.run.vm06:> dd if=/etc/ceph/ceph.client.admin.keyring of=/dev/stdout 2026-04-15T13:33:33.574 INFO:tasks.cephadm:Fetching mon keyring... 2026-04-15T13:33:33.574 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:33:33.574 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/keyring of=/dev/stdout 2026-04-15T13:33:33.623 INFO:tasks.cephadm:Fetching pub ssh key... 2026-04-15T13:33:33.623 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:33:33.623 DEBUG:teuthology.orchestra.run.vm06:> dd if=/home/ubuntu/cephtest/ceph.pub of=/dev/stdout 2026-04-15T13:33:33.669 INFO:tasks.cephadm:Installing pub ssh key for root users... 2026-04-15T13:33:33.669 DEBUG:teuthology.orchestra.run.vm06:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3ypOP4OBBTc+AhOn0J37+8rZEduYkWaoFvuWl6QyqZ3WwuzTvfd2nW0/dTrvxZE7HmOVrG6j5CpjbsO8gaOWdq8L1oanxLoIIEUJEsvCYMmCqi3CIP/CsJRAm8NbP3vqZS+vcginTQnl15yuCEw3mQN6d8SIpCCWcORRnMQjH5R/seODsUynsW+CVk29vPV3BMm4b8P6JiDKaubj1K+HLDfueYDSkGwsGfM+68U0ekFJJoiEBb66L/X8ne8seEYFzqRgv3pd4ijRdbF3o/u4WxTTAeLksBPxnncfFQDgQOggXNwhDYATYp55k88B4Bsbhn4PfqOYBoavzQSthbsR1BZ9Xp5Wy4dLv+qxVOpSThb82HWrt3hG0sXdurfTb6BREV6DQKAyvarKPz5LkOXA8SQRXtUMhQSBDF2thl2+bTQn2kF76Ky3LJKCLfk6xxsFdzabDrqieyxtAxorHKCAokjLjfTu8OgUtFQvr5yfc/PUkRmlHs6OeaLYneNdIpCM= ceph-75e42418-38cf-11f1-9300-4fe77ac4445b' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-15T13:33:33.722 INFO:teuthology.orchestra.run.vm06.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3ypOP4OBBTc+AhOn0J37+8rZEduYkWaoFvuWl6QyqZ3WwuzTvfd2nW0/dTrvxZE7HmOVrG6j5CpjbsO8gaOWdq8L1oanxLoIIEUJEsvCYMmCqi3CIP/CsJRAm8NbP3vqZS+vcginTQnl15yuCEw3mQN6d8SIpCCWcORRnMQjH5R/seODsUynsW+CVk29vPV3BMm4b8P6JiDKaubj1K+HLDfueYDSkGwsGfM+68U0ekFJJoiEBb66L/X8ne8seEYFzqRgv3pd4ijRdbF3o/u4WxTTAeLksBPxnncfFQDgQOggXNwhDYATYp55k88B4Bsbhn4PfqOYBoavzQSthbsR1BZ9Xp5Wy4dLv+qxVOpSThb82HWrt3hG0sXdurfTb6BREV6DQKAyvarKPz5LkOXA8SQRXtUMhQSBDF2thl2+bTQn2kF76Ky3LJKCLfk6xxsFdzabDrqieyxtAxorHKCAokjLjfTu8OgUtFQvr5yfc/PUkRmlHs6OeaLYneNdIpCM= ceph-75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:33:33.727 DEBUG:teuthology.orchestra.run.vm09:> sudo install -d -m 0700 /root/.ssh && echo 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3ypOP4OBBTc+AhOn0J37+8rZEduYkWaoFvuWl6QyqZ3WwuzTvfd2nW0/dTrvxZE7HmOVrG6j5CpjbsO8gaOWdq8L1oanxLoIIEUJEsvCYMmCqi3CIP/CsJRAm8NbP3vqZS+vcginTQnl15yuCEw3mQN6d8SIpCCWcORRnMQjH5R/seODsUynsW+CVk29vPV3BMm4b8P6JiDKaubj1K+HLDfueYDSkGwsGfM+68U0ekFJJoiEBb66L/X8ne8seEYFzqRgv3pd4ijRdbF3o/u4WxTTAeLksBPxnncfFQDgQOggXNwhDYATYp55k88B4Bsbhn4PfqOYBoavzQSthbsR1BZ9Xp5Wy4dLv+qxVOpSThb82HWrt3hG0sXdurfTb6BREV6DQKAyvarKPz5LkOXA8SQRXtUMhQSBDF2thl2+bTQn2kF76Ky3LJKCLfk6xxsFdzabDrqieyxtAxorHKCAokjLjfTu8OgUtFQvr5yfc/PUkRmlHs6OeaLYneNdIpCM= ceph-75e42418-38cf-11f1-9300-4fe77ac4445b' | sudo tee -a /root/.ssh/authorized_keys && sudo chmod 0600 /root/.ssh/authorized_keys 2026-04-15T13:33:33.739 INFO:teuthology.orchestra.run.vm09.stdout:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC3ypOP4OBBTc+AhOn0J37+8rZEduYkWaoFvuWl6QyqZ3WwuzTvfd2nW0/dTrvxZE7HmOVrG6j5CpjbsO8gaOWdq8L1oanxLoIIEUJEsvCYMmCqi3CIP/CsJRAm8NbP3vqZS+vcginTQnl15yuCEw3mQN6d8SIpCCWcORRnMQjH5R/seODsUynsW+CVk29vPV3BMm4b8P6JiDKaubj1K+HLDfueYDSkGwsGfM+68U0ekFJJoiEBb66L/X8ne8seEYFzqRgv3pd4ijRdbF3o/u4WxTTAeLksBPxnncfFQDgQOggXNwhDYATYp55k88B4Bsbhn4PfqOYBoavzQSthbsR1BZ9Xp5Wy4dLv+qxVOpSThb82HWrt3hG0sXdurfTb6BREV6DQKAyvarKPz5LkOXA8SQRXtUMhQSBDF2thl2+bTQn2kF76Ky3LJKCLfk6xxsFdzabDrqieyxtAxorHKCAokjLjfTu8OgUtFQvr5yfc/PUkRmlHs6OeaLYneNdIpCM= ceph-75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:33:33.744 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph config set mgr mgr/cephadm/allow_ptrace true 2026-04-15T13:33:34.075 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:33:34.603 INFO:tasks.cephadm:Distributing conf and client.admin keyring to all hosts + 0755 2026-04-15T13:33:34.603 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch client-keyring set client.admin '*' --mode 0755 2026-04-15T13:33:34.892 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:33:35.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:34 vm06 bash[28114]: audit 2026-04-15T13:33:33.986919+0000 mon.vm06 (mon.0) 105 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:35.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:34 vm06 bash[28114]: audit 2026-04-15T13:33:33.986919+0000 mon.vm06 (mon.0) 105 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:35.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:34 vm06 bash[28114]: audit 2026-04-15T13:33:34.512263+0000 mon.vm06 (mon.0) 106 : audit [INF] from='client.? 192.168.123.106:0/1499453947' entity='client.admin' 2026-04-15T13:33:35.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:34 vm06 bash[28114]: audit 2026-04-15T13:33:34.512263+0000 mon.vm06 (mon.0) 106 : audit [INF] from='client.? 192.168.123.106:0/1499453947' entity='client.admin' 2026-04-15T13:33:35.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:34 vm06 bash[28114]: audit 2026-04-15T13:33:34.756601+0000 mon.vm06 (mon.0) 107 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:35.178 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:34 vm06 bash[28114]: audit 2026-04-15T13:33:34.756601+0000 mon.vm06 (mon.0) 107 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:35.394 INFO:tasks.cephadm:Writing (initial) conf and keyring to vm09 2026-04-15T13:33:35.394 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:33:35.394 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.conf 2026-04-15T13:33:35.398 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:33:35.398 DEBUG:teuthology.orchestra.run.vm09:> dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:33:35.444 INFO:tasks.cephadm:Adding host vm09 to orchestrator... 2026-04-15T13:33:35.444 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch host add vm09 2026-04-15T13:33:35.735 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:33:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:36 vm06 bash[28114]: audit 2026-04-15T13:33:35.307430+0000 mgr.vm06.qbbldl (mgr.14172) 12 : audit [DBG] from='client.14198 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:36 vm06 bash[28114]: audit 2026-04-15T13:33:35.307430+0000 mgr.vm06.qbbldl (mgr.14172) 12 : audit [DBG] from='client.14198 -' entity='client.admin' cmd=[{"prefix": "orch client-keyring set", "entity": "client.admin", "placement": "*", "mode": "0755", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:36 vm06 bash[28114]: audit 2026-04-15T13:33:35.310400+0000 mon.vm06 (mon.0) 108 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:36 vm06 bash[28114]: audit 2026-04-15T13:33:35.310400+0000 mon.vm06 (mon.0) 108 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:36 vm06 bash[28114]: cluster 2026-04-15T13:33:35.994946+0000 mon.vm06 (mon.0) 109 : cluster [DBG] mgrmap e13: vm06.qbbldl(active, since 6s) 2026-04-15T13:33:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:36 vm06 bash[28114]: cluster 2026-04-15T13:33:35.994946+0000 mon.vm06 (mon.0) 109 : cluster [DBG] mgrmap e13: vm06.qbbldl(active, since 6s) 2026-04-15T13:33:37.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.139188+0000 mgr.vm06.qbbldl (mgr.14172) 13 : audit [DBG] from='client.14200 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:37.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.139188+0000 mgr.vm06.qbbldl (mgr.14172) 13 : audit [DBG] from='client.14200 -' entity='client.admin' cmd=[{"prefix": "orch host add", "hostname": "vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:37.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.860674+0000 mon.vm06 (mon.0) 110 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.860674+0000 mon.vm06 (mon.0) 110 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.566 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.863362+0000 mon.vm06 (mon.0) 111 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.863362+0000 mon.vm06 (mon.0) 111 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.864146+0000 mon.vm06 (mon.0) 112 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.864146+0000 mon.vm06 (mon.0) 112 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.865166+0000 mon.vm06 (mon.0) 113 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.865166+0000 mon.vm06 (mon.0) 113 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.865936+0000 mon.vm06 (mon.0) 114 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:36.865936+0000 mon.vm06 (mon.0) 114 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.004788+0000 mon.vm06 (mon.0) 115 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.004788+0000 mon.vm06 (mon.0) 115 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.008653+0000 mon.vm06 (mon.0) 116 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.008653+0000 mon.vm06 (mon.0) 116 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.011472+0000 mon.vm06 (mon.0) 117 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.011472+0000 mon.vm06 (mon.0) 117 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.012445+0000 mon.vm06 (mon.0) 118 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.012445+0000 mon.vm06 (mon.0) 118 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.013642+0000 mon.vm06 (mon.0) 119 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.013642+0000 mon.vm06 (mon.0) 119 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.014877+0000 mon.vm06 (mon.0) 120 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:33:37.567 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 bash[28114]: audit 2026-04-15T13:33:37.014877+0000 mon.vm06 (mon.0) 120 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:33:37.963 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:37 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.866856+0000 mgr.vm06.qbbldl (mgr.14172) 14 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.866856+0000 mgr.vm06.qbbldl (mgr.14172) 14 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.899714+0000 mgr.vm06.qbbldl (mgr.14172) 15 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.899714+0000 mgr.vm06.qbbldl (mgr.14172) 15 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.933170+0000 mgr.vm06.qbbldl (mgr.14172) 16 : cephadm [INF] Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.933170+0000 mgr.vm06.qbbldl (mgr.14172) 16 : cephadm [INF] Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.964718+0000 mgr.vm06.qbbldl (mgr.14172) 17 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:36.964718+0000 mgr.vm06.qbbldl (mgr.14172) 17 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:37.015743+0000 mgr.vm06.qbbldl (mgr.14172) 18 : cephadm [INF] Deploying daemon ceph-exporter.vm06 on vm06 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:37.015743+0000 mgr.vm06.qbbldl (mgr.14172) 18 : cephadm [INF] Deploying daemon ceph-exporter.vm06 on vm06 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:37.059407+0000 mgr.vm06.qbbldl (mgr.14172) 19 : cephadm [INF] Deploying cephadm binary to vm09 2026-04-15T13:33:38.628 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 bash[28114]: cephadm 2026-04-15T13:33:37.059407+0000 mgr.vm06.qbbldl (mgr.14172) 19 : cephadm [INF] Deploying cephadm binary to vm09 2026-04-15T13:33:39.078 INFO:teuthology.orchestra.run.vm06.stdout:Added host 'vm09' with addr '192.168.123.109' 2026-04-15T13:33:39.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:38 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:39.150 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:39.194 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch host ls --format=json 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.343981+0000 mon.vm06 (mon.0) 121 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.343981+0000 mon.vm06 (mon.0) 121 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.346172+0000 mon.vm06 (mon.0) 122 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.346172+0000 mon.vm06 (mon.0) 122 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.348511+0000 mon.vm06 (mon.0) 123 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.348511+0000 mon.vm06 (mon.0) 123 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.350280+0000 mon.vm06 (mon.0) 124 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.350280+0000 mon.vm06 (mon.0) 124 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.352798+0000 mon.vm06 (mon.0) 125 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.352798+0000 mon.vm06 (mon.0) 125 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.353838+0000 mon.vm06 (mon.0) 126 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.353838+0000 mon.vm06 (mon.0) 126 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.354829+0000 mon.vm06 (mon.0) 127 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:38.354829+0000 mon.vm06 (mon.0) 127 : audit [DBG] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: cephadm 2026-04-15T13:33:38.355367+0000 mgr.vm06.qbbldl (mgr.14172) 20 : cephadm [INF] Deploying daemon crash.vm06 on vm06 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: cephadm 2026-04-15T13:33:38.355367+0000 mgr.vm06.qbbldl (mgr.14172) 20 : cephadm [INF] Deploying daemon crash.vm06 on vm06 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.078517+0000 mon.vm06 (mon.0) 128 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.078517+0000 mon.vm06 (mon.0) 128 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.192320+0000 mon.vm06 (mon.0) 129 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.192320+0000 mon.vm06 (mon.0) 129 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.200181+0000 mon.vm06 (mon.0) 130 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.200181+0000 mon.vm06 (mon.0) 130 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.203225+0000 mon.vm06 (mon.0) 131 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.203225+0000 mon.vm06 (mon.0) 131 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.205596+0000 mon.vm06 (mon.0) 132 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.443 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 bash[28114]: audit 2026-04-15T13:33:39.205596+0000 mon.vm06 (mon.0) 132 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:39.494 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:33:39.733 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:39.996 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:39 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:40.333 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:33:40.333 INFO:teuthology.orchestra.run.vm06.stdout:[{"addr": "192.168.123.106", "hostname": "vm06", "labels": [], "status": ""}, {"addr": "192.168.123.109", "hostname": "vm09", "labels": [], "status": ""}] 2026-04-15T13:33:40.409 INFO:tasks.cephadm:Setting crush tunables to default 2026-04-15T13:33:40.409 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd crush tunables default 2026-04-15T13:33:40.674 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: cephadm 2026-04-15T13:33:39.078888+0000 mgr.vm06.qbbldl (mgr.14172) 21 : cephadm [INF] Added host vm09 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: cephadm 2026-04-15T13:33:39.078888+0000 mgr.vm06.qbbldl (mgr.14172) 21 : cephadm [INF] Added host vm09 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: cephadm 2026-04-15T13:33:39.213189+0000 mgr.vm06.qbbldl (mgr.14172) 22 : cephadm [INF] Deploying daemon node-exporter.vm06 on vm06 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: cephadm 2026-04-15T13:33:39.213189+0000 mgr.vm06.qbbldl (mgr.14172) 22 : cephadm [INF] Deploying daemon node-exporter.vm06 on vm06 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.378403+0000 mon.vm06 (mon.0) 133 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.378403+0000 mon.vm06 (mon.0) 133 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.975099+0000 mon.vm06 (mon.0) 134 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.975099+0000 mon.vm06 (mon.0) 134 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.979277+0000 mon.vm06 (mon.0) 135 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.979277+0000 mon.vm06 (mon.0) 135 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.982161+0000 mon.vm06 (mon.0) 136 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.982161+0000 mon.vm06 (mon.0) 136 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.984683+0000 mon.vm06 (mon.0) 137 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:40.692 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:40 vm06 bash[28114]: audit 2026-04-15T13:33:39.984683+0000 mon.vm06 (mon.0) 137 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:41.385 INFO:teuthology.orchestra.run.vm06.stderr:adjusted tunables profile to default 2026-04-15T13:33:41.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:41 vm06 bash[28114]: cephadm 2026-04-15T13:33:39.992030+0000 mgr.vm06.qbbldl (mgr.14172) 23 : cephadm [INF] Deploying daemon alertmanager.vm06 on vm06 2026-04-15T13:33:41.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:41 vm06 bash[28114]: cephadm 2026-04-15T13:33:39.992030+0000 mgr.vm06.qbbldl (mgr.14172) 23 : cephadm [INF] Deploying daemon alertmanager.vm06 on vm06 2026-04-15T13:33:41.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:41 vm06 bash[28114]: audit 2026-04-15T13:33:40.334035+0000 mgr.vm06.qbbldl (mgr.14172) 24 : audit [DBG] from='client.14203 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:33:41.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:41 vm06 bash[28114]: audit 2026-04-15T13:33:40.334035+0000 mgr.vm06.qbbldl (mgr.14172) 24 : audit [DBG] from='client.14203 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:33:41.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:41 vm06 bash[28114]: audit 2026-04-15T13:33:41.043582+0000 mon.vm06 (mon.0) 138 : audit [INF] from='client.? 192.168.123.106:0/4112034267' entity='client.admin' cmd={"prefix": "osd crush tunables", "profile": "default"} : dispatch 2026-04-15T13:33:41.413 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:41 vm06 bash[28114]: audit 2026-04-15T13:33:41.043582+0000 mon.vm06 (mon.0) 138 : audit [INF] from='client.? 192.168.123.106:0/4112034267' entity='client.admin' cmd={"prefix": "osd crush tunables", "profile": "default"} : dispatch 2026-04-15T13:33:41.459 INFO:tasks.cephadm:Adding mon.vm06 on vm06 2026-04-15T13:33:41.459 INFO:tasks.cephadm:Adding mon.vm09 on vm09 2026-04-15T13:33:41.459 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch apply mon '2;vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09' 2026-04-15T13:33:41.753 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:41.753 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:42.288 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled mon update... 2026-04-15T13:33:42.354 DEBUG:teuthology.orchestra.run.vm09:mon.vm09> sudo journalctl -f -n 0 -u ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm09.service 2026-04-15T13:33:42.355 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:42.355 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:42.675 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:42.676 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:42.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:42 vm06 bash[28114]: audit 2026-04-15T13:33:41.385788+0000 mon.vm06 (mon.0) 139 : audit [INF] from='client.? 192.168.123.106:0/4112034267' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-04-15T13:33:42.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:42 vm06 bash[28114]: audit 2026-04-15T13:33:41.385788+0000 mon.vm06 (mon.0) 139 : audit [INF] from='client.? 192.168.123.106:0/4112034267' entity='client.admin' cmd='[{"prefix": "osd crush tunables", "profile": "default"}]': finished 2026-04-15T13:33:42.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:42 vm06 bash[28114]: cluster 2026-04-15T13:33:41.387748+0000 mon.vm06 (mon.0) 140 : cluster [DBG] osdmap e4: 0 total, 0 up, 0 in 2026-04-15T13:33:42.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:42 vm06 bash[28114]: cluster 2026-04-15T13:33:41.387748+0000 mon.vm06 (mon.0) 140 : cluster [DBG] osdmap e4: 0 total, 0 up, 0 in 2026-04-15T13:33:42.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:42 vm06 bash[28114]: audit 2026-04-15T13:33:42.288457+0000 mon.vm06 (mon.0) 141 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:42.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:42 vm06 bash[28114]: audit 2026-04-15T13:33:42.288457+0000 mon.vm06 (mon.0) 141 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:43.129 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:43.129 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:43.129 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:43.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:43.456 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 bash[28114]: audit 2026-04-15T13:33:42.284107+0000 mgr.vm06.qbbldl (mgr.14172) 25 : audit [DBG] from='client.14207 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:43.456 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 bash[28114]: audit 2026-04-15T13:33:42.284107+0000 mgr.vm06.qbbldl (mgr.14172) 25 : audit [DBG] from='client.14207 -' entity='client.admin' cmd=[{"prefix": "orch apply", "service_type": "mon", "placement": "2;vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:33:43.456 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 bash[28114]: cephadm 2026-04-15T13:33:42.285406+0000 mgr.vm06.qbbldl (mgr.14172) 26 : cephadm [INF] Saving service mon spec with placement vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09;count:2 2026-04-15T13:33:43.456 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 bash[28114]: cephadm 2026-04-15T13:33:42.285406+0000 mgr.vm06.qbbldl (mgr.14172) 26 : cephadm [INF] Saving service mon spec with placement vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09;count:2 2026-04-15T13:33:43.456 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 bash[28114]: audit 2026-04-15T13:33:43.129756+0000 mon.vm06 (mon.0) 142 : audit [DBG] from='client.? 192.168.123.109:0/3431272067' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:43.456 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 bash[28114]: audit 2026-04-15T13:33:43.129756+0000 mon.vm06 (mon.0) 142 : audit [DBG] from='client.? 192.168.123.109:0/3431272067' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:43.722 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:43 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:44.195 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:44.195 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:44.463 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:44.463 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:44.904 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:44.904 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:44.904 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.694529+0000 mon.vm06 (mon.0) 143 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.694529+0000 mon.vm06 (mon.0) 143 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.697409+0000 mon.vm06 (mon.0) 144 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.697409+0000 mon.vm06 (mon.0) 144 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.700102+0000 mon.vm06 (mon.0) 145 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.700102+0000 mon.vm06 (mon.0) 145 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.702432+0000 mon.vm06 (mon.0) 146 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:43.702432+0000 mon.vm06 (mon.0) 146 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: cephadm 2026-04-15T13:33:43.703894+0000 mgr.vm06.qbbldl (mgr.14172) 27 : cephadm [INF] Generating cephadm-signed certificates for grafana_cert/grafana_key 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: cephadm 2026-04-15T13:33:43.703894+0000 mgr.vm06.qbbldl (mgr.14172) 27 : cephadm [INF] Generating cephadm-signed certificates for grafana_cert/grafana_key 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.196446+0000 mon.vm06 (mon.0) 147 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.196446+0000 mon.vm06 (mon.0) 147 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.199507+0000 mon.vm06 (mon.0) 148 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.199507+0000 mon.vm06 (mon.0) 148 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.204117+0000 mon.vm06 (mon.0) 149 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.204117+0000 mon.vm06 (mon.0) 149 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.206345+0000 mon.vm06 (mon.0) 150 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.206345+0000 mon.vm06 (mon.0) 150 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.210568+0000 mon.vm06 (mon.0) 151 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.210568+0000 mon.vm06 (mon.0) 151 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.210994+0000 mgr.vm06.qbbldl (mgr.14172) 28 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.210994+0000 mgr.vm06.qbbldl (mgr.14172) 28 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.212935+0000 mon.vm06 (mon.0) 152 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.212935+0000 mon.vm06 (mon.0) 152 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: cephadm 2026-04-15T13:33:44.222299+0000 mgr.vm06.qbbldl (mgr.14172) 29 : cephadm [INF] Deploying daemon grafana.vm06 on vm06 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: cephadm 2026-04-15T13:33:44.222299+0000 mgr.vm06.qbbldl (mgr.14172) 29 : cephadm [INF] Deploying daemon grafana.vm06 on vm06 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.385151+0000 mon.vm06 (mon.0) 153 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:44 vm06 bash[28114]: audit 2026-04-15T13:33:44.385151+0000 mon.vm06 (mon.0) 153 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:45.968 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:45.969 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:46.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:45 vm06 bash[28114]: audit 2026-04-15T13:33:44.905136+0000 mon.vm06 (mon.0) 154 : audit [DBG] from='client.? 192.168.123.109:0/1073258108' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:46.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:45 vm06 bash[28114]: audit 2026-04-15T13:33:44.905136+0000 mon.vm06 (mon.0) 154 : audit [DBG] from='client.? 192.168.123.109:0/1073258108' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:46.241 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:46.242 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:46.688 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:46.688 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:46.688 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:47.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:46 vm06 bash[28114]: audit 2026-04-15T13:33:46.689110+0000 mon.vm06 (mon.0) 155 : audit [DBG] from='client.? 192.168.123.109:0/1566887917' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:47.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:46 vm06 bash[28114]: audit 2026-04-15T13:33:46.689110+0000 mon.vm06 (mon.0) 155 : audit [DBG] from='client.? 192.168.123.109:0/1566887917' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:47.840 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:47.840 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:48.106 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:48.106 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:48.551 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:48.551 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:48.552 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:49.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:48 vm06 bash[28114]: audit 2026-04-15T13:33:48.552246+0000 mon.vm06 (mon.0) 156 : audit [DBG] from='client.? 192.168.123.109:0/1226234107' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:49.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:48 vm06 bash[28114]: audit 2026-04-15T13:33:48.552246+0000 mon.vm06 (mon.0) 156 : audit [DBG] from='client.? 192.168.123.109:0/1226234107' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:49.623 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:49.623 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:49.885 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:49.886 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:50.352 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:50.352 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:50.352 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:50.511 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:50 vm06 bash[28114]: cluster 2026-04-15T13:33:49.107898+0000 mgr.vm06.qbbldl (mgr.14172) 30 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:50.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:50 vm06 bash[28114]: cluster 2026-04-15T13:33:49.107898+0000 mgr.vm06.qbbldl (mgr.14172) 30 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:51.413 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:51.413 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:51.492 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:51 vm06 bash[28114]: audit 2026-04-15T13:33:50.352673+0000 mon.vm06 (mon.0) 157 : audit [DBG] from='client.? 192.168.123.109:0/3925345006' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:51.492 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:51 vm06 bash[28114]: audit 2026-04-15T13:33:50.352673+0000 mon.vm06 (mon.0) 157 : audit [DBG] from='client.? 192.168.123.109:0/3925345006' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:51.673 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:51.673 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:52.324 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:52.324 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:52.324 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:52.672 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:52 vm06 bash[28114]: cluster 2026-04-15T13:33:51.108144+0000 mgr.vm06.qbbldl (mgr.14172) 31 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:52.672 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:52 vm06 bash[28114]: cluster 2026-04-15T13:33:51.108144+0000 mgr.vm06.qbbldl (mgr.14172) 31 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:52.963 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:52 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:53.470 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:53.471 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:52.324294+0000 mon.vm06 (mon.0) 158 : audit [DBG] from='client.? 192.168.123.109:0/662807457' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:52.324294+0000 mon.vm06 (mon.0) 158 : audit [DBG] from='client.? 192.168.123.109:0/662807457' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: cluster 2026-04-15T13:33:53.108314+0000 mgr.vm06.qbbldl (mgr.14172) 32 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: cluster 2026-04-15T13:33:53.108314+0000 mgr.vm06.qbbldl (mgr.14172) 32 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.209243+0000 mon.vm06 (mon.0) 159 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.209243+0000 mon.vm06 (mon.0) 159 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.212622+0000 mon.vm06 (mon.0) 160 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.212622+0000 mon.vm06 (mon.0) 160 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.215513+0000 mon.vm06 (mon.0) 161 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.215513+0000 mon.vm06 (mon.0) 161 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.217303+0000 mon.vm06 (mon.0) 162 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.217303+0000 mon.vm06 (mon.0) 162 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.219248+0000 mon.vm06 (mon.0) 163 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.219248+0000 mon.vm06 (mon.0) 163 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.221348+0000 mon.vm06 (mon.0) 164 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.221348+0000 mon.vm06 (mon.0) 164 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.223157+0000 mon.vm06 (mon.0) 165 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.223157+0000 mon.vm06 (mon.0) 165 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.224857+0000 mon.vm06 (mon.0) 166 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: audit 2026-04-15T13:33:53.224857+0000 mon.vm06 (mon.0) 166 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: cephadm 2026-04-15T13:33:53.393978+0000 mgr.vm06.qbbldl (mgr.14172) 33 : cephadm [INF] Deploying daemon prometheus.vm06 on vm06 2026-04-15T13:33:53.531 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:53 vm06 bash[28114]: cephadm 2026-04-15T13:33:53.393978+0000 mgr.vm06.qbbldl (mgr.14172) 33 : cephadm [INF] Deploying daemon prometheus.vm06 on vm06 2026-04-15T13:33:53.735 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:53.735 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:54.164 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:54.164 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:54.164 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:54.535 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:54 vm06 bash[28114]: audit 2026-04-15T13:33:54.165048+0000 mon.vm06 (mon.0) 167 : audit [DBG] from='client.? 192.168.123.109:0/4162719016' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:54.535 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:54 vm06 bash[28114]: audit 2026-04-15T13:33:54.165048+0000 mon.vm06 (mon.0) 167 : audit [DBG] from='client.? 192.168.123.109:0/4162719016' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:54.535 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:54 vm06 bash[28114]: audit 2026-04-15T13:33:54.396975+0000 mon.vm06 (mon.0) 168 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:54.535 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:54 vm06 bash[28114]: audit 2026-04-15T13:33:54.396975+0000 mon.vm06 (mon.0) 168 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:33:55.231 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:55.231 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:55.513 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:55.514 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:55.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:55 vm06 bash[28114]: cluster 2026-04-15T13:33:55.108501+0000 mgr.vm06.qbbldl (mgr.14172) 34 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:55.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:55 vm06 bash[28114]: cluster 2026-04-15T13:33:55.108501+0000 mgr.vm06.qbbldl (mgr.14172) 34 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:55.955 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:55.955 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:55.955 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:56 vm06 bash[28114]: audit 2026-04-15T13:33:55.955594+0000 mon.vm06 (mon.0) 169 : audit [DBG] from='client.? 192.168.123.109:0/696377492' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:56 vm06 bash[28114]: audit 2026-04-15T13:33:55.955594+0000 mon.vm06 (mon.0) 169 : audit [DBG] from='client.? 192.168.123.109:0/696377492' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:57.040 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:57.040 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:57.309 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:57.310 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:57.727 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:57.727 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:33:57.727 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:57.739 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:57 vm06 bash[28114]: cluster 2026-04-15T13:33:57.108730+0000 mgr.vm06.qbbldl (mgr.14172) 35 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:57.739 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:57 vm06 bash[28114]: cluster 2026-04-15T13:33:57.108730+0000 mgr.vm06.qbbldl (mgr.14172) 35 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:33:58.691 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:58 vm06 bash[28114]: audit 2026-04-15T13:33:57.727718+0000 mon.vm06 (mon.0) 170 : audit [DBG] from='client.? 192.168.123.109:0/486470959' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:58.691 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:58 vm06 bash[28114]: audit 2026-04-15T13:33:57.727718+0000 mon.vm06 (mon.0) 170 : audit [DBG] from='client.? 192.168.123.109:0/486470959' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:33:58.805 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:33:58.805 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:33:58.944 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:58 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:59.086 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:59.086 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:33:59.234 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:33:58 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:33:59.537 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:33:59.537 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:33:59.537 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.075596+0000 mon.vm06 (mon.0) 171 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.075596+0000 mon.vm06 (mon.0) 171 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.078699+0000 mon.vm06 (mon.0) 172 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.078699+0000 mon.vm06 (mon.0) 172 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.081129+0000 mon.vm06 (mon.0) 173 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.081129+0000 mon.vm06 (mon.0) 173 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.082324+0000 mon.vm06 (mon.0) 174 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.082324+0000 mon.vm06 (mon.0) 174 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: cluster 2026-04-15T13:33:59.108925+0000 mgr.vm06.qbbldl (mgr.14172) 36 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: cluster 2026-04-15T13:33:59.108925+0000 mgr.vm06.qbbldl (mgr.14172) 36 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.400620+0000 mon.vm06 (mon.0) 175 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.400620+0000 mon.vm06 (mon.0) 175 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.538059+0000 mon.vm06 (mon.0) 176 : audit [DBG] from='client.? 192.168.123.109:0/4145423211' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:00.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:00 vm06 bash[28114]: audit 2026-04-15T13:33:59.538059+0000 mon.vm06 (mon.0) 176 : audit [DBG] from='client.? 192.168.123.109:0/4145423211' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:00.609 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:00.609 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:00.875 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:00.875 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:01.329 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:01.329 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:01.329 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:01.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:01 vm06 bash[28114]: audit 2026-04-15T13:34:00.286368+0000 mon.vm06 (mon.0) 177 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-15T13:34:01.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:01 vm06 bash[28114]: audit 2026-04-15T13:34:00.286368+0000 mon.vm06 (mon.0) 177 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-15T13:34:01.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:01 vm06 bash[28114]: cluster 2026-04-15T13:34:00.289751+0000 mon.vm06 (mon.0) 178 : cluster [DBG] mgrmap e14: vm06.qbbldl(active, since 31s) 2026-04-15T13:34:01.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:01 vm06 bash[28114]: cluster 2026-04-15T13:34:00.289751+0000 mon.vm06 (mon.0) 178 : cluster [DBG] mgrmap e14: vm06.qbbldl(active, since 31s) 2026-04-15T13:34:02.399 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:02.400 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:02.677 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:02.677 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:02.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:02 vm06 bash[28114]: audit 2026-04-15T13:34:01.329704+0000 mon.vm06 (mon.0) 179 : audit [DBG] from='client.? 192.168.123.109:0/3298504752' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:02.773 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:02 vm06 bash[28114]: audit 2026-04-15T13:34:01.329704+0000 mon.vm06 (mon.0) 179 : audit [DBG] from='client.? 192.168.123.109:0/3298504752' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:03.146 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:03.146 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:03.146 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:03.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:03 vm06 bash[28114]: audit 2026-04-15T13:34:03.147082+0000 mon.vm06 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.109:0/298276015' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:03.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:03 vm06 bash[28114]: audit 2026-04-15T13:34:03.147082+0000 mon.vm06 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.109:0/298276015' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:04.211 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:04.211 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:04.473 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:04.473 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:04.896 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:04.896 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:04.896 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:05.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:04 vm06 bash[28114]: audit 2026-04-15T13:34:04.896848+0000 mon.vm06 (mon.0) 181 : audit [DBG] from='client.? 192.168.123.109:0/3641088732' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:05.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:04 vm06 bash[28114]: audit 2026-04-15T13:34:04.896848+0000 mon.vm06 (mon.0) 181 : audit [DBG] from='client.? 192.168.123.109:0/3641088732' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:05.963 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:05.963 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:06.251 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:06.251 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:06.703 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:06.703 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:06.703 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:07.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:06 vm06 bash[28114]: audit 2026-04-15T13:34:06.703399+0000 mon.vm06 (mon.0) 182 : audit [DBG] from='client.? 192.168.123.109:0/2786921762' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:07.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:06 vm06 bash[28114]: audit 2026-04-15T13:34:06.703399+0000 mon.vm06 (mon.0) 182 : audit [DBG] from='client.? 192.168.123.109:0/2786921762' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:07.782 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:07.782 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:08.056 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:08.056 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:08.466 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:08.467 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:08.467 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.155573+0000 mon.vm06 (mon.0) 183 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.155573+0000 mon.vm06 (mon.0) 183 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.156049+0000 mon.vm06 (mon.0) 184 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.156049+0000 mon.vm06 (mon.0) 184 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.162261+0000 mon.vm06 (mon.0) 185 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.162261+0000 mon.vm06 (mon.0) 185 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.162450+0000 mon.vm06 (mon.0) 186 : cluster [DBG] mgrmap e15: vm06.qbbldl(active, starting, since 0.00652196s) 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.162450+0000 mon.vm06 (mon.0) 186 : cluster [DBG] mgrmap e15: vm06.qbbldl(active, starting, since 0.00652196s) 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.164271+0000 mon.vm06 (mon.0) 187 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.164271+0000 mon.vm06 (mon.0) 187 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.164660+0000 mon.vm06 (mon.0) 188 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.164660+0000 mon.vm06 (mon.0) 188 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.165285+0000 mon.vm06 (mon.0) 189 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.165285+0000 mon.vm06 (mon.0) 189 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.165748+0000 mon.vm06 (mon.0) 190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.165748+0000 mon.vm06 (mon.0) 190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.166193+0000 mon.vm06 (mon.0) 191 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: audit 2026-04-15T13:34:08.166193+0000 mon.vm06 (mon.0) 191 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.171293+0000 mon.vm06 (mon.0) 192 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:34:08.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:08 vm06 bash[28114]: cluster 2026-04-15T13:34:08.171293+0000 mon.vm06 (mon.0) 192 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.437164+0000 mon.vm06 (mon.0) 193 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.437164+0000 mon.vm06 (mon.0) 193 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.446688+0000 mon.vm06 (mon.0) 194 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.446688+0000 mon.vm06 (mon.0) 194 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.467395+0000 mon.vm06 (mon.0) 195 : audit [DBG] from='client.? 192.168.123.109:0/1038886158' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.467395+0000 mon.vm06 (mon.0) 195 : audit [DBG] from='client.? 192.168.123.109:0/1038886158' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.479965+0000 mon.vm06 (mon.0) 196 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.479965+0000 mon.vm06 (mon.0) 196 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.480485+0000 mon.vm06 (mon.0) 197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.480485+0000 mon.vm06 (mon.0) 197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.481139+0000 mon.vm06 (mon.0) 198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.481139+0000 mon.vm06 (mon.0) 198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.517168+0000 mon.vm06 (mon.0) 199 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:08.517168+0000 mon.vm06 (mon.0) 199 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:09.018011+0000 mon.vm06 (mon.0) 200 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: audit 2026-04-15T13:34:09.018011+0000 mon.vm06 (mon.0) 200 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: cluster 2026-04-15T13:34:09.165654+0000 mon.vm06 (mon.0) 201 : cluster [DBG] mgrmap e16: vm06.qbbldl(active, since 1.00973s) 2026-04-15T13:34:09.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:09 vm06 bash[28114]: cluster 2026-04-15T13:34:09.165654+0000 mon.vm06 (mon.0) 201 : cluster [DBG] mgrmap e16: vm06.qbbldl(active, since 1.00973s) 2026-04-15T13:34:10.042 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:10.042 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:10.326 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:10.326 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:10.743 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:10.743 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:10.743 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:08.933218+0000 mgr.vm06.qbbldl (mgr.14229) 1 : cephadm [INF] [15/Apr/2026:13:34:08] ENGINE Bus STARTING 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:08.933218+0000 mgr.vm06.qbbldl (mgr.14229) 1 : cephadm [INF] [15/Apr/2026:13:34:08] ENGINE Bus STARTING 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.036619+0000 mgr.vm06.qbbldl (mgr.14229) 2 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.036619+0000 mgr.vm06.qbbldl (mgr.14229) 2 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.146285+0000 mgr.vm06.qbbldl (mgr.14229) 3 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.146285+0000 mgr.vm06.qbbldl (mgr.14229) 3 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.146434+0000 mgr.vm06.qbbldl (mgr.14229) 4 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Bus STARTED 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.146434+0000 mgr.vm06.qbbldl (mgr.14229) 4 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Bus STARTED 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.146656+0000 mgr.vm06.qbbldl (mgr.14229) 5 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Client ('192.168.123.106', 38848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: cephadm 2026-04-15T13:34:09.146656+0000 mgr.vm06.qbbldl (mgr.14229) 5 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Client ('192.168.123.106', 38848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: audit 2026-04-15T13:34:09.166432+0000 mgr.vm06.qbbldl (mgr.14229) 6 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: audit 2026-04-15T13:34:09.166432+0000 mgr.vm06.qbbldl (mgr.14229) 6 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: audit 2026-04-15T13:34:09.305554+0000 mon.vm06 (mon.0) 202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: audit 2026-04-15T13:34:09.305554+0000 mon.vm06 (mon.0) 202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: audit 2026-04-15T13:34:09.930995+0000 mon.vm06 (mon.0) 203 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:10 vm06 bash[28114]: audit 2026-04-15T13:34:09.930995+0000 mon.vm06 (mon.0) 203 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:11.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:11 vm06 bash[28114]: audit 2026-04-15T13:34:10.744045+0000 mon.vm06 (mon.0) 204 : audit [DBG] from='client.? 192.168.123.109:0/326797789' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:11.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:11 vm06 bash[28114]: audit 2026-04-15T13:34:10.744045+0000 mon.vm06 (mon.0) 204 : audit [DBG] from='client.? 192.168.123.109:0/326797789' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:11.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:11 vm06 bash[28114]: cluster 2026-04-15T13:34:10.934395+0000 mon.vm06 (mon.0) 205 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 2s) 2026-04-15T13:34:11.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:11 vm06 bash[28114]: cluster 2026-04-15T13:34:10.934395+0000 mon.vm06 (mon.0) 205 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 2s) 2026-04-15T13:34:11.826 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:11.826 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:12.094 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:12.094 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:12.524 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:12.524 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:12.525 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.739976+0000 mon.vm06 (mon.0) 206 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.739976+0000 mon.vm06 (mon.0) 206 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.742549+0000 mon.vm06 (mon.0) 207 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.742549+0000 mon.vm06 (mon.0) 207 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.745614+0000 mon.vm06 (mon.0) 208 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.745614+0000 mon.vm06 (mon.0) 208 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.748135+0000 mon.vm06 (mon.0) 209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.748135+0000 mon.vm06 (mon.0) 209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.748873+0000 mon.vm06 (mon.0) 210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:11.748873+0000 mon.vm06 (mon.0) 210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:12.525173+0000 mon.vm06 (mon.0) 211 : audit [DBG] from='client.? 192.168.123.109:0/2958605695' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:13.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:12 vm06 bash[28114]: audit 2026-04-15T13:34:12.525173+0000 mon.vm06 (mon.0) 211 : audit [DBG] from='client.? 192.168.123.109:0/2958605695' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:13.614 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:13.614 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:13.904 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:13.904 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /etc/ceph/ceph.conf 2026-04-15T13:34:14.346 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:14.346 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:14.346 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:13.707249+0000 mon.vm06 (mon.0) 212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:13.707249+0000 mon.vm06 (mon.0) 212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:13.710910+0000 mon.vm06 (mon.0) 213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:13.710910+0000 mon.vm06 (mon.0) 213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.346943+0000 mon.vm06 (mon.0) 214 : audit [DBG] from='client.? 192.168.123.109:0/1627993379' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.346943+0000 mon.vm06 (mon.0) 214 : audit [DBG] from='client.? 192.168.123.109:0/1627993379' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.372967+0000 mon.vm06 (mon.0) 215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.372967+0000 mon.vm06 (mon.0) 215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.376414+0000 mon.vm06 (mon.0) 216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.376414+0000 mon.vm06 (mon.0) 216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.377395+0000 mon.vm06 (mon.0) 217 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.377395+0000 mon.vm06 (mon.0) 217 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.378157+0000 mon.vm06 (mon.0) 218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.378157+0000 mon.vm06 (mon.0) 218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.378617+0000 mon.vm06 (mon.0) 219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.378617+0000 mon.vm06 (mon.0) 219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.553198+0000 mon.vm06 (mon.0) 220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.553198+0000 mon.vm06 (mon.0) 220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.555693+0000 mon.vm06 (mon.0) 221 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.555693+0000 mon.vm06 (mon.0) 221 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.561546+0000 mon.vm06 (mon.0) 222 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.561546+0000 mon.vm06 (mon.0) 222 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.564437+0000 mon.vm06 (mon.0) 223 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.564437+0000 mon.vm06 (mon.0) 223 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.567107+0000 mon.vm06 (mon.0) 224 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.567107+0000 mon.vm06 (mon.0) 224 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.568139+0000 mon.vm06 (mon.0) 225 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.568139+0000 mon.vm06 (mon.0) 225 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.569138+0000 mon.vm06 (mon.0) 226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.569138+0000 mon.vm06 (mon.0) 226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.570134+0000 mon.vm06 (mon.0) 227 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:15.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:14 vm06 bash[28114]: audit 2026-04-15T13:34:14.570134+0000 mon.vm06 (mon.0) 227 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:15.436 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:15.436 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:15.719 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.379336+0000 mgr.vm06.qbbldl (mgr.14229) 7 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.379336+0000 mgr.vm06.qbbldl (mgr.14229) 7 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.379440+0000 mgr.vm06.qbbldl (mgr.14229) 8 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.379440+0000 mgr.vm06.qbbldl (mgr.14229) 8 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.424032+0000 mgr.vm06.qbbldl (mgr.14229) 9 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.424032+0000 mgr.vm06.qbbldl (mgr.14229) 9 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.426972+0000 mgr.vm06.qbbldl (mgr.14229) 10 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.426972+0000 mgr.vm06.qbbldl (mgr.14229) 10 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.468621+0000 mgr.vm06.qbbldl (mgr.14229) 11 : cephadm [INF] Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.468621+0000 mgr.vm06.qbbldl (mgr.14229) 11 : cephadm [INF] Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.471902+0000 mgr.vm06.qbbldl (mgr.14229) 12 : cephadm [INF] Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.471902+0000 mgr.vm06.qbbldl (mgr.14229) 12 : cephadm [INF] Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.509540+0000 mgr.vm06.qbbldl (mgr.14229) 13 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.509540+0000 mgr.vm06.qbbldl (mgr.14229) 13 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.516516+0000 mgr.vm06.qbbldl (mgr.14229) 14 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.516516+0000 mgr.vm06.qbbldl (mgr.14229) 14 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.570712+0000 mgr.vm06.qbbldl (mgr.14229) 15 : cephadm [INF] Deploying daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:16.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:15 vm06 bash[28114]: cephadm 2026-04-15T13:34:14.570712+0000 mgr.vm06.qbbldl (mgr.14229) 15 : cephadm [INF] Deploying daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:16.372 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:16.372 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:16.372 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:17.471 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:17.472 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.035745+0000 mon.vm06 (mon.0) 228 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.035745+0000 mon.vm06 (mon.0) 228 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.038393+0000 mon.vm06 (mon.0) 229 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.038393+0000 mon.vm06 (mon.0) 229 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.040822+0000 mon.vm06 (mon.0) 230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.040822+0000 mon.vm06 (mon.0) 230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.043079+0000 mon.vm06 (mon.0) 231 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.043079+0000 mon.vm06 (mon.0) 231 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.044151+0000 mon.vm06 (mon.0) 232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.044151+0000 mon.vm06 (mon.0) 232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.045295+0000 mon.vm06 (mon.0) 233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.045295+0000 mon.vm06 (mon.0) 233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.046723+0000 mon.vm06 (mon.0) 234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.046723+0000 mon.vm06 (mon.0) 234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: cephadm 2026-04-15T13:34:16.047283+0000 mgr.vm06.qbbldl (mgr.14229) 16 : cephadm [INF] Deploying daemon crash.vm09 on vm09 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: cephadm 2026-04-15T13:34:16.047283+0000 mgr.vm06.qbbldl (mgr.14229) 16 : cephadm [INF] Deploying daemon crash.vm09 on vm09 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.370955+0000 mon.vm06 (mon.0) 235 : audit [DBG] from='client.? 192.168.123.109:0/3202495841' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.370955+0000 mon.vm06 (mon.0) 235 : audit [DBG] from='client.? 192.168.123.109:0/3202495841' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.914255+0000 mon.vm06 (mon.0) 236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.914255+0000 mon.vm06 (mon.0) 236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.917007+0000 mon.vm06 (mon.0) 237 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.917007+0000 mon.vm06 (mon.0) 237 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.919289+0000 mon.vm06 (mon.0) 238 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.919289+0000 mon.vm06 (mon.0) 238 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.921317+0000 mon.vm06 (mon.0) 239 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:17 vm06 bash[28114]: audit 2026-04-15T13:34:16.921317+0000 mon.vm06 (mon.0) 239 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:17.728 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:18.187 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:18.187 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":1,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:32:39.407888Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0]} 2026-04-15T13:34:18.187 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 1 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: cephadm 2026-04-15T13:34:16.922243+0000 mgr.vm06.qbbldl (mgr.14229) 17 : cephadm [INF] Deploying daemon node-exporter.vm09 on vm09 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: cephadm 2026-04-15T13:34:16.922243+0000 mgr.vm06.qbbldl (mgr.14229) 17 : cephadm [INF] Deploying daemon node-exporter.vm09 on vm09 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.636783+0000 mon.vm06 (mon.0) 240 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.636783+0000 mon.vm06 (mon.0) 240 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.639831+0000 mon.vm06 (mon.0) 241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.639831+0000 mon.vm06 (mon.0) 241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.642517+0000 mon.vm06 (mon.0) 242 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.642517+0000 mon.vm06 (mon.0) 242 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.644566+0000 mon.vm06 (mon.0) 243 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.644566+0000 mon.vm06 (mon.0) 243 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.645932+0000 mon.vm06 (mon.0) 244 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.645932+0000 mon.vm06 (mon.0) 244 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.647170+0000 mon.vm06 (mon.0) 245 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.647170+0000 mon.vm06 (mon.0) 245 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.648296+0000 mon.vm06 (mon.0) 246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.648296+0000 mon.vm06 (mon.0) 246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.648846+0000 mon.vm06 (mon.0) 247 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:18.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:18 vm06 bash[28114]: audit 2026-04-15T13:34:17.648846+0000 mon.vm06 (mon.0) 247 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:18.753 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:18 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: cephadm 2026-04-15T13:34:17.649480+0000 mgr.vm06.qbbldl (mgr.14229) 18 : cephadm [INF] Deploying daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: cephadm 2026-04-15T13:34:17.649480+0000 mgr.vm06.qbbldl (mgr.14229) 18 : cephadm [INF] Deploying daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.187869+0000 mon.vm06 (mon.0) 248 : audit [DBG] from='client.? 192.168.123.109:0/1309726075' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.187869+0000 mon.vm06 (mon.0) 248 : audit [DBG] from='client.? 192.168.123.109:0/1309726075' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.451836+0000 mon.vm06 (mon.0) 249 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.451836+0000 mon.vm06 (mon.0) 249 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.480964+0000 mon.vm06 (mon.0) 250 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.480964+0000 mon.vm06 (mon.0) 250 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.483721+0000 mon.vm06 (mon.0) 251 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.483721+0000 mon.vm06 (mon.0) 251 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.486486+0000 mon.vm06 (mon.0) 252 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.486486+0000 mon.vm06 (mon.0) 252 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.488705+0000 mon.vm06 (mon.0) 253 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.488705+0000 mon.vm06 (mon.0) 253 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.489578+0000 mon.vm06 (mon.0) 254 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.489578+0000 mon.vm06 (mon.0) 254 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.490068+0000 mon.vm06 (mon.0) 255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:19 vm06 bash[28114]: audit 2026-04-15T13:34:18.490068+0000 mon.vm06 (mon.0) 255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.306 INFO:tasks.cephadm:Waiting for 2 mons in monmap... 2026-04-15T13:34:19.307 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mon dump -f json 2026-04-15T13:34:19.314 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:19.314 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:19.315 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:19.578 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:19.578 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 systemd[1]: Started Ceph mon.vm09 for 75e42418-38cf-11f1-9300-4fe77ac4445b. 2026-04-15T13:34:19.618 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm09/config 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 0 set uid:gid to 167:167 (ceph:ceph) 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 0 ceph version 20.2.0-19-g7ec4401a095 (7ec4401a095f03c389fcf6df60e966f86395fb86) tentacle (stable - RelWithDebInfo), process ceph-mon, pid 7 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 0 pidfile_write: ignore empty --pid-file 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 0 load: jerasure load: lrc 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: RocksDB version: 7.9.2 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Git sha 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Compile date 2026-04-14 11:30:02 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: DB SUMMARY 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: DB Session ID: XFIOFXOHLGX4OAEKNF6X 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: CURRENT file: CURRENT 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: IDENTITY file: IDENTITY 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: MANIFEST file: MANIFEST-000005 size: 59 Bytes 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: SST files in /var/lib/ceph/mon/ceph-vm09/store.db dir, Total Num: 0, files: 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Write Ahead Log file in /var/lib/ceph/mon/ceph-vm09/store.db: 000004.log size: 511 ; 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.error_if_exists: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.create_if_missing: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.paranoid_checks: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.flush_verify_memtable_count: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.track_and_verify_wals_in_manifest: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.verify_sst_unique_id_in_manifest: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.env: 0x55a764c1a440 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.fs: PosixFileSystem 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.info_log: 0x55a77879cf00 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_file_opening_threads: 16 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.statistics: (nil) 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.use_fsync: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_log_file_size: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_manifest_file_size: 1073741824 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.log_file_time_to_roll: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.keep_log_file_num: 1000 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.recycle_log_file_num: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.allow_fallocate: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.allow_mmap_reads: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.allow_mmap_writes: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.use_direct_reads: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.use_direct_io_for_flush_and_compaction: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.create_missing_column_families: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.db_log_dir: 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.wal_dir: 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.table_cache_numshardbits: 6 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.WAL_ttl_seconds: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.WAL_size_limit_MB: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_write_batch_group_size_bytes: 1048576 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.manifest_preallocation_size: 4194304 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.is_fd_close_on_exec: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.advise_random_on_open: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.db_write_buffer_size: 0 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.write_buffer_manager: 0x55a7787a0500 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.access_hint_on_compaction_start: 1 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.random_access_max_buffer_size: 1048576 2026-04-15T13:34:19.889 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.use_adaptive_mutex: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.rate_limiter: (nil) 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.sst_file_manager.rate_bytes_per_sec: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.wal_recovery_mode: 2 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.enable_thread_tracking: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.enable_pipelined_write: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.unordered_write: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.allow_concurrent_memtable_write: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.enable_write_thread_adaptive_yield: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.write_thread_max_yield_usec: 100 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.write_thread_slow_yield_usec: 3 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.row_cache: None 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.wal_filter: None 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.avoid_flush_during_recovery: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.allow_ingest_behind: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.two_write_queues: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.manual_wal_flush: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.wal_compression: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.atomic_flush: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.avoid_unnecessary_blocking_io: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.persist_stats_to_disk: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.write_dbid_to_manifest: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.log_readahead_size: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.file_checksum_gen_factory: Unknown 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.best_efforts_recovery: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bgerror_resume_count: 2147483647 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bgerror_resume_retry_interval: 1000000 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.allow_data_in_errors: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.db_host_id: __hostname__ 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.enforce_single_del_contracts: true 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_background_jobs: 2 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_background_compactions: -1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_subcompactions: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.avoid_flush_during_shutdown: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.writable_file_max_buffer_size: 1048576 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.delayed_write_rate : 16777216 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_total_wal_size: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.delete_obsolete_files_period_micros: 21600000000 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.stats_dump_period_sec: 600 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.stats_persist_period_sec: 600 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.stats_history_buffer_size: 1048576 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_open_files: -1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bytes_per_sync: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.wal_bytes_per_sync: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.strict_bytes_per_sync: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_readahead_size: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_background_flushes: -1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Compression algorithms supported: 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kZSTD supported: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kXpressCompression supported: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kBZip2Compression supported: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kZSTDNotFinalCompression supported: 0 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kLZ4Compression supported: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kZlibCompression supported: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kLZ4HCCompression supported: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: kSnappyCompression supported: 1 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Fast CRC32 supported: Supported on x86 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: DMutex implementation: pthread_mutex_t 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: [db/version_set.cc:5527] Recovering from manifest file: /var/lib/ceph/mon/ceph-vm09/store.db/MANIFEST-000005 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: [db/column_family.cc:630] --------------- Options for column family [default]: 2026-04-15T13:34:19.890 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.comparator: leveldb.BytewiseComparator 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.merge_operator: 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_filter: None 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_filter_factory: None 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.sst_partitioner_factory: None 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.memtable_factory: SkipListFactory 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.table_factory: BlockBasedTable 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: table_factory options: flush_block_policy_factory: FlushBlockBySizePolicyFactory (0x55a77879c440) 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cache_index_and_filter_blocks: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cache_index_and_filter_blocks_with_high_priority: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: pin_l0_filter_and_index_blocks_in_cache: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: pin_top_level_index_and_filter: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: index_type: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: data_block_index_type: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: index_shortening: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: data_block_hash_table_util_ratio: 0.750000 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: checksum: 4 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: no_block_cache: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_cache: 0x55a7787938d0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_cache_name: BinnedLRUCache 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_cache_options: 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: capacity : 536870912 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: num_shard_bits : 4 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: strict_capacity_limit : 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: high_pri_pool_ratio: 0.000 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_cache_compressed: (nil) 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: persistent_cache: (nil) 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_size: 4096 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_size_deviation: 10 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_restart_interval: 16 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: index_block_restart_interval: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: metadata_block_size: 4096 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: partition_filters: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: use_delta_encoding: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: filter_policy: bloomfilter 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: whole_key_filtering: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: verify_compression: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: read_amp_bytes_per_bit: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: format_version: 5 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: enable_index_compression: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: block_align: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: max_auto_readahead_size: 262144 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: prepopulate_block_cache: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: initial_auto_readahead_size: 8192 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: num_file_reads_for_auto_readahead: 2 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.write_buffer_size: 33554432 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_write_buffer_number: 2 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression: NoCompression 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression: Disabled 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.prefix_extractor: nullptr 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.memtable_insert_with_hint_prefix_extractor: nullptr 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.num_levels: 7 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.min_write_buffer_number_to_merge: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_write_buffer_number_to_maintain: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_write_buffer_size_to_maintain: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.window_bits: -14 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.level: 32767 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.strategy: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_bytes: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.zstd_max_train_bytes: 0 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.parallel_threads: 1 2026-04-15T13:34:19.891 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.enabled: false 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.max_dict_buffer_bytes: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bottommost_compression_opts.use_zstd_dict_trainer: true 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.window_bits: -14 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.level: 32767 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.strategy: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.max_dict_bytes: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.zstd_max_train_bytes: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.use_zstd_dict_trainer: true 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.parallel_threads: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.enabled: false 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compression_opts.max_dict_buffer_bytes: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.level0_file_num_compaction_trigger: 4 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.level0_slowdown_writes_trigger: 20 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.level0_stop_writes_trigger: 36 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.target_file_size_base: 67108864 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.target_file_size_multiplier: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_base: 268435456 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.level_compaction_dynamic_level_bytes: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier: 10.000000 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[0]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[1]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[2]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[3]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[4]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[5]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_bytes_for_level_multiplier_addtl[6]: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_sequential_skip_in_iterations: 8 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_compaction_bytes: 1677721600 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.ignore_max_compaction_bytes_for_input: true 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.arena_block_size: 1048576 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.soft_pending_compaction_bytes_limit: 68719476736 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.hard_pending_compaction_bytes_limit: 274877906944 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.disable_auto_compactions: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_style: kCompactionStyleLevel 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_pri: kMinOverlappingRatio 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_universal.size_ratio: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_universal.min_merge_width: 2 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_universal.max_merge_width: 4294967295 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_universal.max_size_amplification_percent: 200 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_universal.compression_size_percent: -1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_universal.stop_style: kCompactionStopStyleTotalSize 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_fifo.max_table_files_size: 1073741824 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.compaction_options_fifo.allow_compaction: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.table_properties_collectors: CompactOnDeletionCollector (Sliding window size = 32768 Deletion trigger = 16384 Deletion ratio = 0); 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.inplace_update_support: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.inplace_update_num_locks: 10000 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.memtable_prefix_bloom_size_ratio: 0.000000 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.memtable_whole_key_filtering: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.memtable_huge_page_size: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.bloom_locality: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.max_successive_merges: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.optimize_filters_for_hits: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.paranoid_file_checks: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.force_consistency_checks: 1 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.report_bg_io_stats: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.ttl: 2592000 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.periodic_compaction_seconds: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.preclude_last_level_data_seconds: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.preserve_internal_time_seconds: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.enable_blob_files: false 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.min_blob_size: 0 2026-04-15T13:34:19.892 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.blob_file_size: 268435456 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.blob_compression_type: NoCompression 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.enable_blob_garbage_collection: false 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.blob_garbage_collection_age_cutoff: 0.250000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.blob_garbage_collection_force_threshold: 1.000000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.blob_compaction_readahead_size: 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.blob_file_starting_level: 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.739+0000 7fa3dfeacd40 4 rocksdb: Options.experimental_mempurge_threshold: 0.000000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.743+0000 7fa3dfeacd40 4 rocksdb: [db/version_set.cc:5566] Recovered from manifest file:/var/lib/ceph/mon/ceph-vm09/store.db/MANIFEST-000005 succeeded,manifest_file_number is 5, next_file_number is 7, last_sequence is 0, log_number is 0,prev_log_number is 0,max_column_family is 0,min_log_number_to_keep is 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.743+0000 7fa3dfeacd40 4 rocksdb: [db/version_set.cc:5581] Column family [default] (ID 0), log number is 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.743+0000 7fa3dfeacd40 4 rocksdb: [db/db_impl/db_impl_open.cc:539] DB ID: 3aa9188e-215d-4c79-9647-9861765565ff 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.743+0000 7fa3dfeacd40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776260059745777, "job": 1, "event": "recovery_started", "wal_files": [4]} 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.743+0000 7fa3dfeacd40 4 rocksdb: [db/db_impl/db_impl_open.cc:1043] Recovering log #4 mode 2 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.747+0000 7fa3dfeacd40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776260059749727, "cf_name": "default", "job": 1, "event": "table_file_creation", "file_number": 8, "file_size": 1643, "file_checksum": "", "file_checksum_func_name": "Unknown", "smallest_seqno": 1, "largest_seqno": 5, "table_properties": {"data_size": 523, "index_size": 31, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 1, "index_value_is_delta_encoded": 1, "filter_size": 69, "raw_key_size": 115, "raw_average_key_size": 23, "raw_value_size": 401, "raw_average_value_size": 80, "num_data_blocks": 1, "num_entries": 5, "num_filter_entries": 5, "num_deletions": 0, "num_merge_operands": 0, "num_range_deletions": 0, "format_version": 0, "fixed_key_len": 0, "filter_policy": "bloomfilter", "column_family_name": "default", "column_family_id": 0, "comparator": "leveldb.BytewiseComparator", "merge_operator": "", "prefix_extractor_name": "nullptr", "property_collectors": "[CompactOnDeletionCollector]", "compression": "NoCompression", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; max_dict_buffer_bytes=0; use_zstd_dict_trainer=1; ", "creation_time": 1776260059, "oldest_key_time": 0, "file_creation_time": 0, "slow_compression_estimated_data_size": 0, "fast_compression_estimated_data_size": 0, "db_id": "3aa9188e-215d-4c79-9647-9861765565ff", "db_session_id": "XFIOFXOHLGX4OAEKNF6X", "orig_file_number": 8, "seqno_to_time_mapping": "N/A"}} 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.747+0000 7fa3dfeacd40 4 rocksdb: EVENT_LOG_v1 {"time_micros": 1776260059749796, "job": 1, "event": "recovery_finished"} 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.747+0000 7fa3dfeacd40 4 rocksdb: [db/version_set.cc:5047] Creating manifest 10 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3dfeacd40 4 rocksdb: [file/delete_scheduler.cc:74] Deleted file /var/lib/ceph/mon/ceph-vm09/store.db/000004.log immediately, rate_bytes_per_sec 0, total_trash_size 0 max_trash_db_ratio 0.250000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3dfeacd40 4 rocksdb: [db/db_impl/db_impl_open.cc:1987] SstFileManager instance 0x55a7787bee00 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3dfeacd40 4 rocksdb: DB pointer 0x55a7795bc000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3dfeacd40 0 mon.vm09 does not exist in monmap, will attempt to join an existing cluster 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3dfeacd40 0 using public_addr v2:192.168.123.109:0/0 -> [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3d5c52640 4 rocksdb: [db/db_impl/db_impl.cc:1109] ------- DUMPING STATS ------- 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.759+0000 7fa3d5c52640 4 rocksdb: [db/db_impl/db_impl.cc:1111] 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: ** DB Stats ** 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Cumulative writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 GB, 0.00 MB/s 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Cumulative WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Cumulative stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Interval writes: 0 writes, 0 keys, 0 commit groups, 0.0 writes per commit group, ingest: 0.00 MB, 0.00 MB/s 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Interval WAL: 0 writes, 0 syncs, 0.00 writes per sync, written: 0.00 GB, 0.00 MB/s 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Interval stall: 00:00:0.000 H:M:S, 0.0 percent 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: ** Compaction Stats [default] ** 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Level Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: L0 1/0 1.60 KB 0.2 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.4 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Sum 1/0 1.60 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.4 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Int 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.4 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: ** Compaction Stats [default] ** 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Priority Files Size Score Read(GB) Rn(GB) Rnp1(GB) Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) Comp(sec) CompMergeCPU(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop Rblob(GB) Wblob(GB) 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: User 0/0 0.00 KB 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.4 0.00 0.00 1 0.004 0 0 0.0 0.0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Blob file count: 0, total size: 0.0 GB, garbage size: 0.0 GB, space amp: 0.0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Uptime(secs): 0.0 total, 0.0 interval 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Flush(GB): cumulative 0.000, interval 0.000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: AddFile(GB): cumulative 0.000, interval 0.000 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: AddFile(Total Files): cumulative 0, interval 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: AddFile(L0 Files): cumulative 0, interval 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: AddFile(Keys): cumulative 0, interval 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Cumulative compaction: 0.00 GB write, 0.08 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Interval compaction: 0.00 GB write, 0.08 MB/s write, 0.00 GB read, 0.00 MB/s read, 0.0 seconds 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Stalls(count): 0 level0_slowdown, 0 level0_slowdown_with_compaction, 0 level0_numfiles, 0 level0_numfiles_with_compaction, 0 stop for pending_compaction_bytes, 0 slowdown for pending_compaction_bytes, 0 memtable_compaction, 0 memtable_slowdown, interval 0 total count 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Block cache BinnedLRUCache@0x55a7787938d0#7 capacity: 512.00 MB usage: 0.22 KB table_size: 0 occupancy: 18446744073709551615 collections: 1 last_copies: 0 last_secs: 0.00014 secs_since: 0 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: Block cache entry stats(count,size,portion): FilterBlock(1,0.11 KB,2.08616e-05%) IndexBlock(1,0.11 KB,2.08616e-05%) Misc(1,0.00 KB,0%) 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: ** File Read Latency Histogram By Level [default] ** 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.763+0000 7fa3dfeacd40 0 starting mon.vm09 rank -1 at public addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] at bind addrs [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon_data /var/lib/ceph/mon/ceph-vm09 fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.763+0000 7fa3dfeacd40 1 mon.vm09@-1(???) e0 preinit fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:33:57.108730+0000 mgr.vm06.qbbldl (mgr.14172) 35 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:33:57.108730+0000 mgr.vm06.qbbldl (mgr.14172) 35 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:57.727718+0000 mon.vm06 (mon.0) 170 : audit [DBG] from='client.? 192.168.123.109:0/486470959' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:57.727718+0000 mon.vm06 (mon.0) 170 : audit [DBG] from='client.? 192.168.123.109:0/486470959' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.075596+0000 mon.vm06 (mon.0) 171 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.075596+0000 mon.vm06 (mon.0) 171 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.893 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.078699+0000 mon.vm06 (mon.0) 172 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.078699+0000 mon.vm06 (mon.0) 172 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.081129+0000 mon.vm06 (mon.0) 173 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.081129+0000 mon.vm06 (mon.0) 173 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.082324+0000 mon.vm06 (mon.0) 174 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.082324+0000 mon.vm06 (mon.0) 174 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr module enable", "module": "prometheus"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:33:59.108925+0000 mgr.vm06.qbbldl (mgr.14172) 36 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:33:59.108925+0000 mgr.vm06.qbbldl (mgr.14172) 36 : cluster [DBG] pgmap v9: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.400620+0000 mon.vm06 (mon.0) 175 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.400620+0000 mon.vm06 (mon.0) 175 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.538059+0000 mon.vm06 (mon.0) 176 : audit [DBG] from='client.? 192.168.123.109:0/4145423211' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:33:59.538059+0000 mon.vm06 (mon.0) 176 : audit [DBG] from='client.? 192.168.123.109:0/4145423211' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:00.286368+0000 mon.vm06 (mon.0) 177 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:00.286368+0000 mon.vm06 (mon.0) 177 : audit [INF] from='mgr.14172 192.168.123.106:0/3410181505' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "mgr module enable", "module": "prometheus"}]': finished 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:00.289751+0000 mon.vm06 (mon.0) 178 : cluster [DBG] mgrmap e14: vm06.qbbldl(active, since 31s) 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:00.289751+0000 mon.vm06 (mon.0) 178 : cluster [DBG] mgrmap e14: vm06.qbbldl(active, since 31s) 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:01.329704+0000 mon.vm06 (mon.0) 179 : audit [DBG] from='client.? 192.168.123.109:0/3298504752' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:01.329704+0000 mon.vm06 (mon.0) 179 : audit [DBG] from='client.? 192.168.123.109:0/3298504752' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:03.147082+0000 mon.vm06 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.109:0/298276015' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:03.147082+0000 mon.vm06 (mon.0) 180 : audit [DBG] from='client.? 192.168.123.109:0/298276015' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:04.896848+0000 mon.vm06 (mon.0) 181 : audit [DBG] from='client.? 192.168.123.109:0/3641088732' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:04.896848+0000 mon.vm06 (mon.0) 181 : audit [DBG] from='client.? 192.168.123.109:0/3641088732' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:06.703399+0000 mon.vm06 (mon.0) 182 : audit [DBG] from='client.? 192.168.123.109:0/2786921762' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:06.703399+0000 mon.vm06 (mon.0) 182 : audit [DBG] from='client.? 192.168.123.109:0/2786921762' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.155573+0000 mon.vm06 (mon.0) 183 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.155573+0000 mon.vm06 (mon.0) 183 : cluster [INF] Active manager daemon vm06.qbbldl restarted 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.156049+0000 mon.vm06 (mon.0) 184 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.156049+0000 mon.vm06 (mon.0) 184 : cluster [INF] Activating manager daemon vm06.qbbldl 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.162261+0000 mon.vm06 (mon.0) 185 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.162261+0000 mon.vm06 (mon.0) 185 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.162450+0000 mon.vm06 (mon.0) 186 : cluster [DBG] mgrmap e15: vm06.qbbldl(active, starting, since 0.00652196s) 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.162450+0000 mon.vm06 (mon.0) 186 : cluster [DBG] mgrmap e15: vm06.qbbldl(active, starting, since 0.00652196s) 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.164271+0000 mon.vm06 (mon.0) 187 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.164271+0000 mon.vm06 (mon.0) 187 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.164660+0000 mon.vm06 (mon.0) 188 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.164660+0000 mon.vm06 (mon.0) 188 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm06.qbbldl", "id": "vm06.qbbldl"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.165285+0000 mon.vm06 (mon.0) 189 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.165285+0000 mon.vm06 (mon.0) 189 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mds metadata"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.165748+0000 mon.vm06 (mon.0) 190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.165748+0000 mon.vm06 (mon.0) 190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.166193+0000 mon.vm06 (mon.0) 191 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.166193+0000 mon.vm06 (mon.0) 191 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.171293+0000 mon.vm06 (mon.0) 192 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:08.171293+0000 mon.vm06 (mon.0) 192 : cluster [INF] Manager daemon vm06.qbbldl is now available 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.437164+0000 mon.vm06 (mon.0) 193 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.437164+0000 mon.vm06 (mon.0) 193 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.446688+0000 mon.vm06 (mon.0) 194 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.446688+0000 mon.vm06 (mon.0) 194 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.467395+0000 mon.vm06 (mon.0) 195 : audit [DBG] from='client.? 192.168.123.109:0/1038886158' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.467395+0000 mon.vm06 (mon.0) 195 : audit [DBG] from='client.? 192.168.123.109:0/1038886158' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.479965+0000 mon.vm06 (mon.0) 196 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.479965+0000 mon.vm06 (mon.0) 196 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.480485+0000 mon.vm06 (mon.0) 197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.480485+0000 mon.vm06 (mon.0) 197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.481139+0000 mon.vm06 (mon.0) 198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:34:19.894 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.481139+0000 mon.vm06 (mon.0) 198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/mirror_snapshot_schedule"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.517168+0000 mon.vm06 (mon.0) 199 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:08.517168+0000 mon.vm06 (mon.0) 199 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix":"config rm","who":"mgr","name":"mgr/rbd_support/vm06.qbbldl/trash_purge_schedule"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.018011+0000 mon.vm06 (mon.0) 200 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.018011+0000 mon.vm06 (mon.0) 200 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:09.165654+0000 mon.vm06 (mon.0) 201 : cluster [DBG] mgrmap e16: vm06.qbbldl(active, since 1.00973s) 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:09.165654+0000 mon.vm06 (mon.0) 201 : cluster [DBG] mgrmap e16: vm06.qbbldl(active, since 1.00973s) 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:08.933218+0000 mgr.vm06.qbbldl (mgr.14229) 1 : cephadm [INF] [15/Apr/2026:13:34:08] ENGINE Bus STARTING 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:08.933218+0000 mgr.vm06.qbbldl (mgr.14229) 1 : cephadm [INF] [15/Apr/2026:13:34:08] ENGINE Bus STARTING 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.036619+0000 mgr.vm06.qbbldl (mgr.14229) 2 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.036619+0000 mgr.vm06.qbbldl (mgr.14229) 2 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on http://192.168.123.106:8765 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.146285+0000 mgr.vm06.qbbldl (mgr.14229) 3 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.146285+0000 mgr.vm06.qbbldl (mgr.14229) 3 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Serving on https://192.168.123.106:7150 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.146434+0000 mgr.vm06.qbbldl (mgr.14229) 4 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Bus STARTED 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.146434+0000 mgr.vm06.qbbldl (mgr.14229) 4 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Bus STARTED 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.146656+0000 mgr.vm06.qbbldl (mgr.14229) 5 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Client ('192.168.123.106', 38848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:09.146656+0000 mgr.vm06.qbbldl (mgr.14229) 5 : cephadm [INF] [15/Apr/2026:13:34:09] ENGINE Client ('192.168.123.106', 38848) lost — peer dropped the TLS connection suddenly, during handshake: (6, 'TLS/SSL connection has been closed (EOF) (_ssl.c:1147)') 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.166432+0000 mgr.vm06.qbbldl (mgr.14229) 6 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.166432+0000 mgr.vm06.qbbldl (mgr.14229) 6 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.305554+0000 mon.vm06 (mon.0) 202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.305554+0000 mon.vm06 (mon.0) 202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.930995+0000 mon.vm06 (mon.0) 203 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:09.930995+0000 mon.vm06 (mon.0) 203 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:10.744045+0000 mon.vm06 (mon.0) 204 : audit [DBG] from='client.? 192.168.123.109:0/326797789' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:10.744045+0000 mon.vm06 (mon.0) 204 : audit [DBG] from='client.? 192.168.123.109:0/326797789' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:10.934395+0000 mon.vm06 (mon.0) 205 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 2s) 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cluster 2026-04-15T13:34:10.934395+0000 mon.vm06 (mon.0) 205 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 2s) 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.739976+0000 mon.vm06 (mon.0) 206 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.739976+0000 mon.vm06 (mon.0) 206 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.742549+0000 mon.vm06 (mon.0) 207 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.742549+0000 mon.vm06 (mon.0) 207 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.745614+0000 mon.vm06 (mon.0) 208 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.745614+0000 mon.vm06 (mon.0) 208 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.748135+0000 mon.vm06 (mon.0) 209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.748135+0000 mon.vm06 (mon.0) 209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.748873+0000 mon.vm06 (mon.0) 210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:11.748873+0000 mon.vm06 (mon.0) 210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:12.525173+0000 mon.vm06 (mon.0) 211 : audit [DBG] from='client.? 192.168.123.109:0/2958605695' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:12.525173+0000 mon.vm06 (mon.0) 211 : audit [DBG] from='client.? 192.168.123.109:0/2958605695' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:13.707249+0000 mon.vm06 (mon.0) 212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:13.707249+0000 mon.vm06 (mon.0) 212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:13.710910+0000 mon.vm06 (mon.0) 213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:13.710910+0000 mon.vm06 (mon.0) 213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.346943+0000 mon.vm06 (mon.0) 214 : audit [DBG] from='client.? 192.168.123.109:0/1627993379' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.346943+0000 mon.vm06 (mon.0) 214 : audit [DBG] from='client.? 192.168.123.109:0/1627993379' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.372967+0000 mon.vm06 (mon.0) 215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.372967+0000 mon.vm06 (mon.0) 215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.376414+0000 mon.vm06 (mon.0) 216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.376414+0000 mon.vm06 (mon.0) 216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.377395+0000 mon.vm06 (mon.0) 217 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.377395+0000 mon.vm06 (mon.0) 217 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.378157+0000 mon.vm06 (mon.0) 218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.378157+0000 mon.vm06 (mon.0) 218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.378617+0000 mon.vm06 (mon.0) 219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.378617+0000 mon.vm06 (mon.0) 219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:19.895 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.553198+0000 mon.vm06 (mon.0) 220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.553198+0000 mon.vm06 (mon.0) 220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.555693+0000 mon.vm06 (mon.0) 221 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.555693+0000 mon.vm06 (mon.0) 221 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.561546+0000 mon.vm06 (mon.0) 222 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.561546+0000 mon.vm06 (mon.0) 222 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.564437+0000 mon.vm06 (mon.0) 223 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.564437+0000 mon.vm06 (mon.0) 223 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.567107+0000 mon.vm06 (mon.0) 224 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.567107+0000 mon.vm06 (mon.0) 224 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.568139+0000 mon.vm06 (mon.0) 225 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.568139+0000 mon.vm06 (mon.0) 225 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.569138+0000 mon.vm06 (mon.0) 226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.569138+0000 mon.vm06 (mon.0) 226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]}]': finished 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.570134+0000 mon.vm06 (mon.0) 227 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:14.570134+0000 mon.vm06 (mon.0) 227 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.379336+0000 mgr.vm06.qbbldl (mgr.14229) 7 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.379336+0000 mgr.vm06.qbbldl (mgr.14229) 7 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.379440+0000 mgr.vm06.qbbldl (mgr.14229) 8 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.379440+0000 mgr.vm06.qbbldl (mgr.14229) 8 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.424032+0000 mgr.vm06.qbbldl (mgr.14229) 9 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.424032+0000 mgr.vm06.qbbldl (mgr.14229) 9 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.426972+0000 mgr.vm06.qbbldl (mgr.14229) 10 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.426972+0000 mgr.vm06.qbbldl (mgr.14229) 10 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.468621+0000 mgr.vm06.qbbldl (mgr.14229) 11 : cephadm [INF] Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.468621+0000 mgr.vm06.qbbldl (mgr.14229) 11 : cephadm [INF] Updating vm09:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.471902+0000 mgr.vm06.qbbldl (mgr.14229) 12 : cephadm [INF] Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.471902+0000 mgr.vm06.qbbldl (mgr.14229) 12 : cephadm [INF] Updating vm06:/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.509540+0000 mgr.vm06.qbbldl (mgr.14229) 13 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.509540+0000 mgr.vm06.qbbldl (mgr.14229) 13 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.516516+0000 mgr.vm06.qbbldl (mgr.14229) 14 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.516516+0000 mgr.vm06.qbbldl (mgr.14229) 14 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.client.admin.keyring 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.570712+0000 mgr.vm06.qbbldl (mgr.14229) 15 : cephadm [INF] Deploying daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:14.570712+0000 mgr.vm06.qbbldl (mgr.14229) 15 : cephadm [INF] Deploying daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.035745+0000 mon.vm06 (mon.0) 228 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.035745+0000 mon.vm06 (mon.0) 228 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.038393+0000 mon.vm06 (mon.0) 229 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.038393+0000 mon.vm06 (mon.0) 229 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.040822+0000 mon.vm06 (mon.0) 230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.040822+0000 mon.vm06 (mon.0) 230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.043079+0000 mon.vm06 (mon.0) 231 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.043079+0000 mon.vm06 (mon.0) 231 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.044151+0000 mon.vm06 (mon.0) 232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.044151+0000 mon.vm06 (mon.0) 232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.045295+0000 mon.vm06 (mon.0) 233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.045295+0000 mon.vm06 (mon.0) 233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]}]': finished 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.046723+0000 mon.vm06 (mon.0) 234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.896 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.046723+0000 mon.vm06 (mon.0) 234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:16.047283+0000 mgr.vm06.qbbldl (mgr.14229) 16 : cephadm [INF] Deploying daemon crash.vm09 on vm09 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:16.047283+0000 mgr.vm06.qbbldl (mgr.14229) 16 : cephadm [INF] Deploying daemon crash.vm09 on vm09 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.370955+0000 mon.vm06 (mon.0) 235 : audit [DBG] from='client.? 192.168.123.109:0/3202495841' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.370955+0000 mon.vm06 (mon.0) 235 : audit [DBG] from='client.? 192.168.123.109:0/3202495841' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.914255+0000 mon.vm06 (mon.0) 236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.914255+0000 mon.vm06 (mon.0) 236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.917007+0000 mon.vm06 (mon.0) 237 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.917007+0000 mon.vm06 (mon.0) 237 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.919289+0000 mon.vm06 (mon.0) 238 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.919289+0000 mon.vm06 (mon.0) 238 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.921317+0000 mon.vm06 (mon.0) 239 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:16.921317+0000 mon.vm06 (mon.0) 239 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:16.922243+0000 mgr.vm06.qbbldl (mgr.14229) 17 : cephadm [INF] Deploying daemon node-exporter.vm09 on vm09 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:16.922243+0000 mgr.vm06.qbbldl (mgr.14229) 17 : cephadm [INF] Deploying daemon node-exporter.vm09 on vm09 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.636783+0000 mon.vm06 (mon.0) 240 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.636783+0000 mon.vm06 (mon.0) 240 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.639831+0000 mon.vm06 (mon.0) 241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.639831+0000 mon.vm06 (mon.0) 241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.642517+0000 mon.vm06 (mon.0) 242 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.642517+0000 mon.vm06 (mon.0) 242 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.644566+0000 mon.vm06 (mon.0) 243 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.644566+0000 mon.vm06 (mon.0) 243 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.645932+0000 mon.vm06 (mon.0) 244 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.645932+0000 mon.vm06 (mon.0) 244 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.647170+0000 mon.vm06 (mon.0) 245 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.647170+0000 mon.vm06 (mon.0) 245 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]}]': finished 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.648296+0000 mon.vm06 (mon.0) 246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.648296+0000 mon.vm06 (mon.0) 246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.648846+0000 mon.vm06 (mon.0) 247 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:17.648846+0000 mon.vm06 (mon.0) 247 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:17.649480+0000 mgr.vm06.qbbldl (mgr.14229) 18 : cephadm [INF] Deploying daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: cephadm 2026-04-15T13:34:17.649480+0000 mgr.vm06.qbbldl (mgr.14229) 18 : cephadm [INF] Deploying daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.187869+0000 mon.vm06 (mon.0) 248 : audit [DBG] from='client.? 192.168.123.109:0/1309726075' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.187869+0000 mon.vm06 (mon.0) 248 : audit [DBG] from='client.? 192.168.123.109:0/1309726075' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.451836+0000 mon.vm06 (mon.0) 249 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.451836+0000 mon.vm06 (mon.0) 249 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.480964+0000 mon.vm06 (mon.0) 250 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.480964+0000 mon.vm06 (mon.0) 250 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.483721+0000 mon.vm06 (mon.0) 251 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.483721+0000 mon.vm06 (mon.0) 251 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.486486+0000 mon.vm06 (mon.0) 252 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.486486+0000 mon.vm06 (mon.0) 252 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.488705+0000 mon.vm06 (mon.0) 253 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.488705+0000 mon.vm06 (mon.0) 253 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.489578+0000 mon.vm06 (mon.0) 254 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.489578+0000 mon.vm06 (mon.0) 254 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.490068+0000 mon.vm06 (mon.0) 255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: audit 2026-04-15T13:34:18.490068+0000 mon.vm06 (mon.0) 255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 0 mon.vm09@-1(synchronizing).mds e1 new map 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 0 mon.vm09@-1(synchronizing).mds e1 print_map 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: e1 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: btime 2026-04-15T13:32:40:722083+0000 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: enable_multiple, ever_enabled_multiple: 1,1 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: default compat: compat={},rocompat={},incompat={1=base v0.20,2=client writeable ranges,3=default file layouts on dirs,4=dir inode in separate object,5=mds uses versioned encoding,6=dirfrag is stored in omap,8=no anchor table,9=file layout v2,10=snaprealm v2,11=minor log segments,12=quiesce subvolumes} 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: legacy client fscid: -1 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: No filesystems configured 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e0 _set_cache_ratios kv ratio 0.25 inc ratio 0.375 full ratio 0.375 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e0 register_cache_with_pcm pcm target: 2147483648 pcm max: 1020054732 pcm min: 134217728 inc_osd_cache size: 1 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e1 e1: 0 total, 0 up, 0 in 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e2 e2: 0 total, 0 up, 0 in 2026-04-15T13:34:19.897 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e3 e3: 0 total, 0 up, 0 in 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e4 e4: 0 total, 0 up, 0 in 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).osd e5 e5: 0 total, 0 up, 0 in 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 0 mon.vm09@-1(synchronizing).osd e5 crush map has features 3314932999778484224, adjusting msgr requires 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 0 mon.vm09@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 0 mon.vm09@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.827+0000 7fa3d8c58640 0 mon.vm09@-1(synchronizing).osd e5 crush map has features 288514050185494528, adjusting msgr requires 2026-04-15T13:34:19.898 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:19 vm09 bash[34466]: debug 2026-04-15T13:34:19.839+0000 7fa3d8c58640 1 mon.vm09@-1(synchronizing).paxosservice(auth 1..8) refresh upgraded, format 0 -> 3 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:19.853402+0000 mon.vm06 (mon.0) 262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:19.853402+0000 mon.vm06 (mon.0) 262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:19.853639+0000 mon.vm06 (mon.0) 263 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:19.853639+0000 mon.vm06 (mon.0) 263 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:19.853821+0000 mon.vm06 (mon.0) 264 : cluster [INF] mon.vm06 calling monitor election 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:19.853821+0000 mon.vm06 (mon.0) 264 : cluster [INF] mon.vm06 calling monitor election 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:20.850218+0000 mon.vm06 (mon.0) 265 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:20.850218+0000 mon.vm06 (mon.0) 265 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:21.848775+0000 mon.vm09 (mon.1) 1 : cluster [INF] mon.vm09 calling monitor election 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:21.848775+0000 mon.vm09 (mon.1) 1 : cluster [INF] mon.vm09 calling monitor election 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:21.850327+0000 mon.vm06 (mon.0) 266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:21.850327+0000 mon.vm06 (mon.0) 266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:22.849995+0000 mon.vm06 (mon.0) 267 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:22.849995+0000 mon.vm06 (mon.0) 267 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:23.480055+0000 mon.vm06 (mon.0) 268 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:23.480055+0000 mon.vm06 (mon.0) 268 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:23.850081+0000 mon.vm06 (mon.0) 269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:23.850081+0000 mon.vm06 (mon.0) 269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.850476+0000 mon.vm06 (mon.0) 270 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.850476+0000 mon.vm06 (mon.0) 270 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.859116+0000 mon.vm06 (mon.0) 271 : cluster [INF] mon.vm06 is new leader, mons vm06,vm09 in quorum (ranks 0,1) 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.859116+0000 mon.vm06 (mon.0) 271 : cluster [INF] mon.vm06 is new leader, mons vm06,vm09 in quorum (ranks 0,1) 2026-04-15T13:34:25.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863521+0000 mon.vm06 (mon.0) 272 : cluster [DBG] monmap epoch 2 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863521+0000 mon.vm06 (mon.0) 272 : cluster [DBG] monmap epoch 2 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863537+0000 mon.vm06 (mon.0) 273 : cluster [DBG] fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863537+0000 mon.vm06 (mon.0) 273 : cluster [DBG] fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863553+0000 mon.vm06 (mon.0) 274 : cluster [DBG] last_changed 2026-04-15T13:34:19.850266+0000 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863553+0000 mon.vm06 (mon.0) 274 : cluster [DBG] last_changed 2026-04-15T13:34:19.850266+0000 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863568+0000 mon.vm06 (mon.0) 275 : cluster [DBG] created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863568+0000 mon.vm06 (mon.0) 275 : cluster [DBG] created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863586+0000 mon.vm06 (mon.0) 276 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863586+0000 mon.vm06 (mon.0) 276 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863602+0000 mon.vm06 (mon.0) 277 : cluster [DBG] election_strategy: 1 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863602+0000 mon.vm06 (mon.0) 277 : cluster [DBG] election_strategy: 1 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863617+0000 mon.vm06 (mon.0) 278 : cluster [DBG] 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863617+0000 mon.vm06 (mon.0) 278 : cluster [DBG] 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863626+0000 mon.vm06 (mon.0) 279 : cluster [DBG] 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.863626+0000 mon.vm06 (mon.0) 279 : cluster [DBG] 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864215+0000 mon.vm06 (mon.0) 280 : cluster [DBG] fsmap 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864215+0000 mon.vm06 (mon.0) 280 : cluster [DBG] fsmap 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864250+0000 mon.vm06 (mon.0) 281 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864250+0000 mon.vm06 (mon.0) 281 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864417+0000 mon.vm06 (mon.0) 282 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 16s) 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864417+0000 mon.vm06 (mon.0) 282 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 16s) 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864568+0000 mon.vm06 (mon.0) 283 : cluster [INF] overall HEALTH_OK 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: cluster 2026-04-15T13:34:24.864568+0000 mon.vm06 (mon.0) 283 : cluster [INF] overall HEALTH_OK 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.868561+0000 mon.vm06 (mon.0) 284 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.868561+0000 mon.vm06 (mon.0) 284 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.872084+0000 mon.vm06 (mon.0) 285 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.872084+0000 mon.vm06 (mon.0) 285 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.875956+0000 mon.vm06 (mon.0) 286 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.875956+0000 mon.vm06 (mon.0) 286 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.876588+0000 mon.vm06 (mon.0) 287 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.876588+0000 mon.vm06 (mon.0) 287 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.877085+0000 mon.vm06 (mon.0) 288 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:25.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:24 vm06 bash[28114]: audit 2026-04-15T13:34:24.877085+0000 mon.vm06 (mon.0) 288 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:19.853402+0000 mon.vm06 (mon.0) 262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:19.853402+0000 mon.vm06 (mon.0) 262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:19.853639+0000 mon.vm06 (mon.0) 263 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:19.853639+0000 mon.vm06 (mon.0) 263 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:34:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:19.853821+0000 mon.vm06 (mon.0) 264 : cluster [INF] mon.vm06 calling monitor election 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:19.853821+0000 mon.vm06 (mon.0) 264 : cluster [INF] mon.vm06 calling monitor election 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:20.850218+0000 mon.vm06 (mon.0) 265 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:20.850218+0000 mon.vm06 (mon.0) 265 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:21.848775+0000 mon.vm09 (mon.1) 1 : cluster [INF] mon.vm09 calling monitor election 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:21.848775+0000 mon.vm09 (mon.1) 1 : cluster [INF] mon.vm09 calling monitor election 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:21.850327+0000 mon.vm06 (mon.0) 266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:21.850327+0000 mon.vm06 (mon.0) 266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:22.849995+0000 mon.vm06 (mon.0) 267 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:22.849995+0000 mon.vm06 (mon.0) 267 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:23.480055+0000 mon.vm06 (mon.0) 268 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:23.480055+0000 mon.vm06 (mon.0) 268 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:23.850081+0000 mon.vm06 (mon.0) 269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:23.850081+0000 mon.vm06 (mon.0) 269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.850476+0000 mon.vm06 (mon.0) 270 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.850476+0000 mon.vm06 (mon.0) 270 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.859116+0000 mon.vm06 (mon.0) 271 : cluster [INF] mon.vm06 is new leader, mons vm06,vm09 in quorum (ranks 0,1) 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.859116+0000 mon.vm06 (mon.0) 271 : cluster [INF] mon.vm06 is new leader, mons vm06,vm09 in quorum (ranks 0,1) 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863521+0000 mon.vm06 (mon.0) 272 : cluster [DBG] monmap epoch 2 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863521+0000 mon.vm06 (mon.0) 272 : cluster [DBG] monmap epoch 2 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863537+0000 mon.vm06 (mon.0) 273 : cluster [DBG] fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863537+0000 mon.vm06 (mon.0) 273 : cluster [DBG] fsid 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863553+0000 mon.vm06 (mon.0) 274 : cluster [DBG] last_changed 2026-04-15T13:34:19.850266+0000 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863553+0000 mon.vm06 (mon.0) 274 : cluster [DBG] last_changed 2026-04-15T13:34:19.850266+0000 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863568+0000 mon.vm06 (mon.0) 275 : cluster [DBG] created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863568+0000 mon.vm06 (mon.0) 275 : cluster [DBG] created 2026-04-15T13:32:39.407888+0000 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863586+0000 mon.vm06 (mon.0) 276 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863586+0000 mon.vm06 (mon.0) 276 : cluster [DBG] min_mon_release 20 (tentacle) 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863602+0000 mon.vm06 (mon.0) 277 : cluster [DBG] election_strategy: 1 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863602+0000 mon.vm06 (mon.0) 277 : cluster [DBG] election_strategy: 1 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863617+0000 mon.vm06 (mon.0) 278 : cluster [DBG] 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863617+0000 mon.vm06 (mon.0) 278 : cluster [DBG] 0: [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] mon.vm06 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863626+0000 mon.vm06 (mon.0) 279 : cluster [DBG] 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.863626+0000 mon.vm06 (mon.0) 279 : cluster [DBG] 1: [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] mon.vm09 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864215+0000 mon.vm06 (mon.0) 280 : cluster [DBG] fsmap 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864215+0000 mon.vm06 (mon.0) 280 : cluster [DBG] fsmap 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864250+0000 mon.vm06 (mon.0) 281 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864250+0000 mon.vm06 (mon.0) 281 : cluster [DBG] osdmap e5: 0 total, 0 up, 0 in 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864417+0000 mon.vm06 (mon.0) 282 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 16s) 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864417+0000 mon.vm06 (mon.0) 282 : cluster [DBG] mgrmap e17: vm06.qbbldl(active, since 16s) 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864568+0000 mon.vm06 (mon.0) 283 : cluster [INF] overall HEALTH_OK 2026-04-15T13:34:25.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: cluster 2026-04-15T13:34:24.864568+0000 mon.vm06 (mon.0) 283 : cluster [INF] overall HEALTH_OK 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.868561+0000 mon.vm06 (mon.0) 284 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.868561+0000 mon.vm06 (mon.0) 284 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.872084+0000 mon.vm06 (mon.0) 285 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.872084+0000 mon.vm06 (mon.0) 285 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.875956+0000 mon.vm06 (mon.0) 286 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.875956+0000 mon.vm06 (mon.0) 286 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.876588+0000 mon.vm06 (mon.0) 287 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.876588+0000 mon.vm06 (mon.0) 287 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.877085+0000 mon.vm06 (mon.0) 288 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:25.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:24 vm09 bash[34466]: audit 2026-04-15T13:34:24.877085+0000 mon.vm06 (mon.0) 288 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:26.219 INFO:teuthology.orchestra.run.vm09.stdout: 2026-04-15T13:34:26.219 INFO:teuthology.orchestra.run.vm09.stdout:{"epoch":2,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","modified":"2026-04-15T13:34:19.850266Z","created":"2026-04-15T13:32:39.407888Z","min_mon_release":20,"min_mon_release_name":"tentacle","election_strategy":1,"disallowed_leaders":"","stretch_mode":false,"tiebreaker_mon":"","removed_ranks":"","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus","pacific","elector-pinging","quincy","reef","squid","tentacle"],"optional":[]},"mons":[{"rank":0,"name":"vm06","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:3300","nonce":0},{"type":"v1","addr":"192.168.123.106:6789","nonce":0}]},"addr":"192.168.123.106:6789/0","public_addr":"192.168.123.106:6789/0","priority":0,"weight":0,"crush_location":"{}"},{"rank":1,"name":"vm09","public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:3300","nonce":0},{"type":"v1","addr":"192.168.123.109:6789","nonce":0}]},"addr":"192.168.123.109:6789/0","public_addr":"192.168.123.109:6789/0","priority":0,"weight":0,"crush_location":"{}"}],"quorum":[0,1]} 2026-04-15T13:34:26.219 INFO:teuthology.orchestra.run.vm09.stderr:dumped monmap epoch 2 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.877706+0000 mgr.vm06.qbbldl (mgr.14229) 20 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.877706+0000 mgr.vm06.qbbldl (mgr.14229) 20 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.878133+0000 mgr.vm06.qbbldl (mgr.14229) 21 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.878133+0000 mgr.vm06.qbbldl (mgr.14229) 21 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.919870+0000 mgr.vm06.qbbldl (mgr.14229) 22 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.919870+0000 mgr.vm06.qbbldl (mgr.14229) 22 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.924377+0000 mgr.vm06.qbbldl (mgr.14229) 23 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.924377+0000 mgr.vm06.qbbldl (mgr.14229) 23 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.969906+0000 mon.vm06 (mon.0) 289 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.969906+0000 mon.vm06 (mon.0) 289 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.973839+0000 mon.vm06 (mon.0) 290 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.973839+0000 mon.vm06 (mon.0) 290 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.977265+0000 mon.vm06 (mon.0) 291 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.977265+0000 mon.vm06 (mon.0) 291 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.980738+0000 mon.vm06 (mon.0) 292 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.980738+0000 mon.vm06 (mon.0) 292 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.983675+0000 mon.vm06 (mon.0) 293 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:24.983675+0000 mon.vm06 (mon.0) 293 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.994531+0000 mgr.vm06.qbbldl (mgr.14229) 24 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm09'}) 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:24.994531+0000 mgr.vm06.qbbldl (mgr.14229) 24 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm09'}) 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:25.153612+0000 mgr.vm06.qbbldl (mgr.14229) 25 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: cephadm 2026-04-15T13:34:25.153612+0000 mgr.vm06.qbbldl (mgr.14229) 25 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.816179+0000 mon.vm06 (mon.0) 294 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.816179+0000 mon.vm06 (mon.0) 294 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.820035+0000 mon.vm06 (mon.0) 295 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.820035+0000 mon.vm06 (mon.0) 295 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.820680+0000 mon.vm06 (mon.0) 296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm06.qbbldl", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.820680+0000 mon.vm06 (mon.0) 296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm06.qbbldl", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.821189+0000 mon.vm06 (mon.0) 297 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.821189+0000 mon.vm06 (mon.0) 297 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.821572+0000 mon.vm06 (mon.0) 298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.821572+0000 mon.vm06 (mon.0) 298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:26.232 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.850395+0000 mon.vm06 (mon.0) 299 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:26.233 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:25 vm09 bash[34466]: audit 2026-04-15T13:34:25.850395+0000 mon.vm06 (mon.0) 299 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.877706+0000 mgr.vm06.qbbldl (mgr.14229) 20 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.877706+0000 mgr.vm06.qbbldl (mgr.14229) 20 : cephadm [INF] Updating vm06:/etc/ceph/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.878133+0000 mgr.vm06.qbbldl (mgr.14229) 21 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.878133+0000 mgr.vm06.qbbldl (mgr.14229) 21 : cephadm [INF] Updating vm09:/etc/ceph/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.919870+0000 mgr.vm06.qbbldl (mgr.14229) 22 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.919870+0000 mgr.vm06.qbbldl (mgr.14229) 22 : cephadm [INF] Updating vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.924377+0000 mgr.vm06.qbbldl (mgr.14229) 23 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.924377+0000 mgr.vm06.qbbldl (mgr.14229) 23 : cephadm [INF] Updating vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/config/ceph.conf 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.969906+0000 mon.vm06 (mon.0) 289 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.969906+0000 mon.vm06 (mon.0) 289 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.973839+0000 mon.vm06 (mon.0) 290 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.973839+0000 mon.vm06 (mon.0) 290 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.977265+0000 mon.vm06 (mon.0) 291 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.977265+0000 mon.vm06 (mon.0) 291 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.980738+0000 mon.vm06 (mon.0) 292 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.980738+0000 mon.vm06 (mon.0) 292 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.983675+0000 mon.vm06 (mon.0) 293 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:24.983675+0000 mon.vm06 (mon.0) 293 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.994531+0000 mgr.vm06.qbbldl (mgr.14229) 24 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm09'}) 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:24.994531+0000 mgr.vm06.qbbldl (mgr.14229) 24 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ceph-exporter.vm09'}) 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:25.153612+0000 mgr.vm06.qbbldl (mgr.14229) 25 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: cephadm 2026-04-15T13:34:25.153612+0000 mgr.vm06.qbbldl (mgr.14229) 25 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.816179+0000 mon.vm06 (mon.0) 294 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.816179+0000 mon.vm06 (mon.0) 294 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.820035+0000 mon.vm06 (mon.0) 295 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.820035+0000 mon.vm06 (mon.0) 295 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.820680+0000 mon.vm06 (mon.0) 296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm06.qbbldl", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.820680+0000 mon.vm06 (mon.0) 296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm06.qbbldl", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.821189+0000 mon.vm06 (mon.0) 297 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.821189+0000 mon.vm06 (mon.0) 297 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.821572+0000 mon.vm06 (mon.0) 298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.821572+0000 mon.vm06 (mon.0) 298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.850395+0000 mon.vm06 (mon.0) 299 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:26.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:25 vm06 bash[28114]: audit 2026-04-15T13:34:25.850395+0000 mon.vm06 (mon.0) 299 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:34:26.283 INFO:tasks.cephadm:Generating final ceph.conf file... 2026-04-15T13:34:26.283 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph config generate-minimal-conf 2026-04-15T13:34:26.552 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:26.941 INFO:teuthology.orchestra.run.vm06.stdout:# minimal ceph.conf for 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:26.941 INFO:teuthology.orchestra.run.vm06.stdout:[global] 2026-04-15T13:34:26.941 INFO:teuthology.orchestra.run.vm06.stdout: fsid = 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:34:26.941 INFO:teuthology.orchestra.run.vm06.stdout: mon_host = [v2:192.168.123.106:3300/0,v1:192.168.123.106:6789/0] [v2:192.168.123.109:3300/0,v1:192.168.123.109:6789/0] 2026-04-15T13:34:27.038 INFO:tasks.cephadm:Distributing (final) config and client.admin keyring... 2026-04-15T13:34:27.038 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:34:27.038 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.conf 2026-04-15T13:34:27.045 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:34:27.045 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:27.098 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:34:27.098 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.conf 2026-04-15T13:34:27.106 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:34:27.106 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.admin.keyring 2026-04-15T13:34:27.157 DEBUG:tasks.cephadm:set 0 configs 2026-04-15T13:34:27.157 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph config dump 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:25.820493+0000 mgr.vm06.qbbldl (mgr.14229) 26 : cephadm [INF] Reconfiguring mgr.vm06.qbbldl (unknown last config time)... 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:25.820493+0000 mgr.vm06.qbbldl (mgr.14229) 26 : cephadm [INF] Reconfiguring mgr.vm06.qbbldl (unknown last config time)... 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:25.822003+0000 mgr.vm06.qbbldl (mgr.14229) 27 : cephadm [INF] Reconfiguring daemon mgr.vm06.qbbldl on vm06 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:25.822003+0000 mgr.vm06.qbbldl (mgr.14229) 27 : cephadm [INF] Reconfiguring daemon mgr.vm06.qbbldl on vm06 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.219729+0000 mon.vm06 (mon.0) 300 : audit [DBG] from='client.? 192.168.123.109:0/2729844555' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.219729+0000 mon.vm06 (mon.0) 300 : audit [DBG] from='client.? 192.168.123.109:0/2729844555' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.222864+0000 mon.vm06 (mon.0) 301 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.222864+0000 mon.vm06 (mon.0) 301 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.227058+0000 mon.vm06 (mon.0) 302 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.227058+0000 mon.vm06 (mon.0) 302 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.227768+0000 mgr.vm06.qbbldl (mgr.14229) 28 : cephadm [INF] Reconfiguring mon.vm06 (unknown last config time)... 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.227768+0000 mgr.vm06.qbbldl (mgr.14229) 28 : cephadm [INF] Reconfiguring mon.vm06 (unknown last config time)... 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.227977+0000 mon.vm06 (mon.0) 303 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.227977+0000 mon.vm06 (mon.0) 303 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.228534+0000 mon.vm06 (mon.0) 304 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.228534+0000 mon.vm06 (mon.0) 304 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.229006+0000 mon.vm06 (mon.0) 305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.229006+0000 mon.vm06 (mon.0) 305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.229595+0000 mgr.vm06.qbbldl (mgr.14229) 29 : cephadm [INF] Reconfiguring daemon mon.vm06 on vm06 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.229595+0000 mgr.vm06.qbbldl (mgr.14229) 29 : cephadm [INF] Reconfiguring daemon mon.vm06 on vm06 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.660290+0000 mon.vm06 (mon.0) 306 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.660290+0000 mon.vm06 (mon.0) 306 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.666417+0000 mon.vm06 (mon.0) 307 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.666417+0000 mon.vm06 (mon.0) 307 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.667429+0000 mon.vm06 (mon.0) 308 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.667429+0000 mon.vm06 (mon.0) 308 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.667999+0000 mon.vm06 (mon.0) 309 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.667999+0000 mon.vm06 (mon.0) 309 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.939878+0000 mon.vm06 (mon.0) 310 : audit [DBG] from='client.? 192.168.123.106:0/773747586' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.162 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:26 vm06 bash[28114]: audit 2026-04-15T13:34:26.939878+0000 mon.vm06 (mon.0) 310 : audit [DBG] from='client.? 192.168.123.106:0/773747586' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:25.820493+0000 mgr.vm06.qbbldl (mgr.14229) 26 : cephadm [INF] Reconfiguring mgr.vm06.qbbldl (unknown last config time)... 2026-04-15T13:34:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:25.820493+0000 mgr.vm06.qbbldl (mgr.14229) 26 : cephadm [INF] Reconfiguring mgr.vm06.qbbldl (unknown last config time)... 2026-04-15T13:34:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:25.822003+0000 mgr.vm06.qbbldl (mgr.14229) 27 : cephadm [INF] Reconfiguring daemon mgr.vm06.qbbldl on vm06 2026-04-15T13:34:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:25.822003+0000 mgr.vm06.qbbldl (mgr.14229) 27 : cephadm [INF] Reconfiguring daemon mgr.vm06.qbbldl on vm06 2026-04-15T13:34:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.219729+0000 mon.vm06 (mon.0) 300 : audit [DBG] from='client.? 192.168.123.109:0/2729844555' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.219729+0000 mon.vm06 (mon.0) 300 : audit [DBG] from='client.? 192.168.123.109:0/2729844555' entity='client.admin' cmd={"prefix": "mon dump", "format": "json"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.222864+0000 mon.vm06 (mon.0) 301 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.222864+0000 mon.vm06 (mon.0) 301 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.227058+0000 mon.vm06 (mon.0) 302 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.227058+0000 mon.vm06 (mon.0) 302 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.227768+0000 mgr.vm06.qbbldl (mgr.14229) 28 : cephadm [INF] Reconfiguring mon.vm06 (unknown last config time)... 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.227768+0000 mgr.vm06.qbbldl (mgr.14229) 28 : cephadm [INF] Reconfiguring mon.vm06 (unknown last config time)... 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.227977+0000 mon.vm06 (mon.0) 303 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.227977+0000 mon.vm06 (mon.0) 303 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.228534+0000 mon.vm06 (mon.0) 304 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.228534+0000 mon.vm06 (mon.0) 304 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.229006+0000 mon.vm06 (mon.0) 305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.229006+0000 mon.vm06 (mon.0) 305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.229595+0000 mgr.vm06.qbbldl (mgr.14229) 29 : cephadm [INF] Reconfiguring daemon mon.vm06 on vm06 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.229595+0000 mgr.vm06.qbbldl (mgr.14229) 29 : cephadm [INF] Reconfiguring daemon mon.vm06 on vm06 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.660290+0000 mon.vm06 (mon.0) 306 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.660290+0000 mon.vm06 (mon.0) 306 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.666417+0000 mon.vm06 (mon.0) 307 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.666417+0000 mon.vm06 (mon.0) 307 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.667429+0000 mon.vm06 (mon.0) 308 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.667429+0000 mon.vm06 (mon.0) 308 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm06", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.667999+0000 mon.vm06 (mon.0) 309 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.667999+0000 mon.vm06 (mon.0) 309 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.939878+0000 mon.vm06 (mon.0) 310 : audit [DBG] from='client.? 192.168.123.106:0/773747586' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:26 vm09 bash[34466]: audit 2026-04-15T13:34:26.939878+0000 mon.vm06 (mon.0) 310 : audit [DBG] from='client.? 192.168.123.106:0/773747586' entity='client.admin' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:27.408 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:WHO MASK LEVEL OPTION VALUE RO 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global dev auth_debug true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global basic container_image harbor.clyso.com/custom-ceph/ceph/ceph@sha256:b4cb326006c035fcaccf517a7733ba26fcc96dafbf1f00ae8ac89d843a9451a9 * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global dev debug_asserts_on_shutdown true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global basic log_to_file true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global basic log_to_journald false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global basic log_to_stderr false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_allow_pool_delete true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_clock_drift_allowed 1.000000 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_cluster_log_to_file true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_max_pg_per_osd 10000 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_pg_warn_max_object_skew 0.000000 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_warn_on_crush_straw_calc_version_zero false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_warn_on_legacy_crush_tunables false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_warn_on_osd_down_out_interval_zero false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global dev mon_warn_on_pool_pg_num_not_power_of_two false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced mon_warn_on_too_few_osds false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global dev ms_die_on_bug true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global dev ms_die_on_old_message true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced osd_pool_default_erasure_code_profile plugin=isa technique=reed_sol_van k=2 m=1 crush-failure-domain=osd 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced osd_pool_default_pg_autoscale_mode off 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:global advanced public_network 192.168.123.0/24,192.168.123.1/32 * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced auth_allow_insecure_global_id_reclaim false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced auth_mon_ticket_ttl 660.000000 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced auth_service_ticket_ttl 240.000000 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced debug_mon 20/20 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced debug_ms 1/1 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced debug_paxos 20/20 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_data_avail_warn 5 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_mgr_mkfs_grace 240 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon dev mon_osd_prime_pg_temp true 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_osd_reporter_subtree_level osd 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_reweight_min_bytes_per_osd 10 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_reweight_min_pgs_per_osd 4 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mon advanced mon_warn_on_insecure_global_id_reclaim_allowed false 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced debug_mgr 20/20 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced debug_ms 1/1 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mgr/cephadm/allow_ptrace true * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mgr/cephadm/container_init True * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mgr/cephadm/migration_current 7 * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mgr/dashboard/GRAFANA_API_SSL_VERIFY false * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mgr/dashboard/ssl_server_port 8443 * 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mgr/orchestrator/orchestrator cephadm 2026-04-15T13:34:27.810 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mon_reweight_min_bytes_per_osd 10 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:mgr advanced mon_reweight_min_pgs_per_osd 4 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev bdev_debug_aio true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced debug_ms 1/1 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced debug_osd 20/20 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_misdirected_ops true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_op_order true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_pg_log_writeout true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_shutdown true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_verify_cached_snaps true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_verify_missing_on_start true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd dev osd_debug_verify_stray_on_activate true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_deep_scrub_update_digest_min_age 30 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd basic osd_mclock_iops_capacity_threshold_hdd 49000.000000 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_mclock_profile high_recovery_ops 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_memory_target_autotune true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_op_queue debug_random * 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_op_queue_cut_off debug_random * 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_recover_clone_overlap true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_recovery_max_chunk 1048576 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_scrub_load_threshold 5.000000 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_scrub_max_interval 600.000000 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:osd advanced osd_shutdown_pgref_assert true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:client.rgw advanced rgw_cache_enabled true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:client.rgw advanced rgw_enable_ops_log true 2026-04-15T13:34:27.811 INFO:teuthology.orchestra.run.vm06.stdout:client.rgw advanced rgw_enable_usage_log true 2026-04-15T13:34:27.912 INFO:tasks.cephadm:Deploying OSDs... 2026-04-15T13:34:27.912 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:34:27.912 DEBUG:teuthology.orchestra.run.vm06:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T13:34:27.915 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T13:34:27.916 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme0n1 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme0n1 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 895 Links: 1 Device type: 103,1 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:31:28.640122448 +0000 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:31:28.596122448 +0000 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:31:28.596122448 +0000 2026-04-15T13:34:27.966 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:34:27.966 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-15T13:34:28.015 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:34:28.015 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:34:28.015 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.00017133 s, 3.0 MB/s 2026-04-15T13:34:28.015 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-15T13:34:28.069 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme1n1 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme1n1 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 904 Links: 1 Device type: 103,3 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:31:28.956122448 +0000 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:31:28.916122448 +0000 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:31:28.916122448 +0000 2026-04-15T13:34:28.118 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:34:28.118 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.667230+0000 mgr.vm06.qbbldl (mgr.14229) 30 : cephadm [INF] Reconfiguring crash.vm06 (monmap changed)... 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.667230+0000 mgr.vm06.qbbldl (mgr.14229) 30 : cephadm [INF] Reconfiguring crash.vm06 (monmap changed)... 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.668532+0000 mgr.vm06.qbbldl (mgr.14229) 31 : cephadm [INF] Reconfiguring daemon crash.vm06 on vm06 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: cephadm 2026-04-15T13:34:26.668532+0000 mgr.vm06.qbbldl (mgr.14229) 31 : cephadm [INF] Reconfiguring daemon crash.vm06 on vm06 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.138879+0000 mon.vm06 (mon.0) 311 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.138879+0000 mon.vm06 (mon.0) 311 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.142652+0000 mon.vm06 (mon.0) 312 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.142652+0000 mon.vm06 (mon.0) 312 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: cephadm 2026-04-15T13:34:27.143294+0000 mgr.vm06.qbbldl (mgr.14229) 32 : cephadm [INF] Reconfiguring grafana.vm06 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm06', 'secure_monitoring_stack:False'] (diff {'prometheus.vm06'}) 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: cephadm 2026-04-15T13:34:27.143294+0000 mgr.vm06.qbbldl (mgr.14229) 32 : cephadm [INF] Reconfiguring grafana.vm06 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm06', 'secure_monitoring_stack:False'] (diff {'prometheus.vm06'}) 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.344266+0000 mon.vm06 (mon.0) 313 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.344266+0000 mon.vm06 (mon.0) 313 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.807786+0000 mon.vm06 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.106:0/1734267297' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:27.807786+0000 mon.vm06 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.106:0/1734267297' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.104413+0000 mon.vm06 (mon.0) 315 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.104413+0000 mon.vm06 (mon.0) 315 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.110742+0000 mon.vm06 (mon.0) 316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.110742+0000 mon.vm06 (mon.0) 316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.112116+0000 mon.vm06 (mon.0) 317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.112116+0000 mon.vm06 (mon.0) 317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:28.170 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.112983+0000 mon.vm06 (mon.0) 318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:28.171 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:28 vm06 bash[28114]: audit 2026-04-15T13:34:28.112983+0000 mon.vm06 (mon.0) 318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:28.171 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:34:28.171 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:34:28.171 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000149149 s, 3.4 MB/s 2026-04-15T13:34:28.173 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-15T13:34:28.221 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme2n1 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme2n1 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 915 Links: 1 Device type: 103,5 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:31:29.264122448 +0000 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:31:29.220122448 +0000 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:31:29.220122448 +0000 2026-04-15T13:34:28.267 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:34:28.267 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-15T13:34:28.319 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:34:28.319 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:34:28.319 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000259316 s, 2.0 MB/s 2026-04-15T13:34:28.320 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-15T13:34:28.369 DEBUG:teuthology.orchestra.run.vm06:> stat /dev/nvme3n1 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout: File: /dev/nvme3n1 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout:Device: 5h/5d Inode: 925 Links: 1 Device type: 103,7 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout:Access: 2026-04-15 13:31:29.572122448 +0000 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout:Modify: 2026-04-15 13:31:29.528122448 +0000 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout:Change: 2026-04-15 13:31:29.528122448 +0000 2026-04-15T13:34:28.414 INFO:teuthology.orchestra.run.vm06.stdout: Birth: - 2026-04-15T13:34:28.414 DEBUG:teuthology.orchestra.run.vm06:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-15T13:34:28.462 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records in 2026-04-15T13:34:28.462 INFO:teuthology.orchestra.run.vm06.stderr:1+0 records out 2026-04-15T13:34:28.462 INFO:teuthology.orchestra.run.vm06.stderr:512 bytes copied, 0.000169627 s, 3.0 MB/s 2026-04-15T13:34:28.463 DEBUG:teuthology.orchestra.run.vm06:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-15T13:34:28.508 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:34:28.508 DEBUG:teuthology.orchestra.run.vm09:> dd if=/scratch_devs of=/dev/stdout 2026-04-15T13:34:28.512 DEBUG:teuthology.misc:devs=['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1', '/dev/nvme3n1'] 2026-04-15T13:34:28.512 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme0n1 2026-04-15T13:34:28.557 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme0n1 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 896 Links: 1 Device type: 103,1 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:31:50.183559268 +0000 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:31:50.139559268 +0000 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:31:50.139559268 +0000 2026-04-15T13:34:28.558 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:34:28.558 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme0n1 of=/dev/null count=1 2026-04-15T13:34:28.607 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.667230+0000 mgr.vm06.qbbldl (mgr.14229) 30 : cephadm [INF] Reconfiguring crash.vm06 (monmap changed)... 2026-04-15T13:34:28.607 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.667230+0000 mgr.vm06.qbbldl (mgr.14229) 30 : cephadm [INF] Reconfiguring crash.vm06 (monmap changed)... 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.668532+0000 mgr.vm06.qbbldl (mgr.14229) 31 : cephadm [INF] Reconfiguring daemon crash.vm06 on vm06 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: cephadm 2026-04-15T13:34:26.668532+0000 mgr.vm06.qbbldl (mgr.14229) 31 : cephadm [INF] Reconfiguring daemon crash.vm06 on vm06 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.138879+0000 mon.vm06 (mon.0) 311 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.138879+0000 mon.vm06 (mon.0) 311 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.142652+0000 mon.vm06 (mon.0) 312 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.142652+0000 mon.vm06 (mon.0) 312 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: cephadm 2026-04-15T13:34:27.143294+0000 mgr.vm06.qbbldl (mgr.14229) 32 : cephadm [INF] Reconfiguring grafana.vm06 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm06', 'secure_monitoring_stack:False'] (diff {'prometheus.vm06'}) 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: cephadm 2026-04-15T13:34:27.143294+0000 mgr.vm06.qbbldl (mgr.14229) 32 : cephadm [INF] Reconfiguring grafana.vm06 deps ['secure_monitoring_stack:False'] -> ['prometheus.vm06', 'secure_monitoring_stack:False'] (diff {'prometheus.vm06'}) 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.344266+0000 mon.vm06 (mon.0) 313 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.344266+0000 mon.vm06 (mon.0) 313 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.807786+0000 mon.vm06 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.106:0/1734267297' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:27.807786+0000 mon.vm06 (mon.0) 314 : audit [DBG] from='client.? 192.168.123.106:0/1734267297' entity='client.admin' cmd={"prefix": "config dump"} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.104413+0000 mon.vm06 (mon.0) 315 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.104413+0000 mon.vm06 (mon.0) 315 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.110742+0000 mon.vm06 (mon.0) 316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.110742+0000 mon.vm06 (mon.0) 316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.112116+0000 mon.vm06 (mon.0) 317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.112116+0000 mon.vm06 (mon.0) 317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm06", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.112983+0000 mon.vm06 (mon.0) 318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:28 vm09 bash[34466]: audit 2026-04-15T13:34:28.112983+0000 mon.vm06 (mon.0) 318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:28.608 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:34:28.608 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:34:28.608 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000207008 s, 2.5 MB/s 2026-04-15T13:34:28.609 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme0n1 2026-04-15T13:34:28.658 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme1n1 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme1n1 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 905 Links: 1 Device type: 103,3 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:31:50.511559268 +0000 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:31:50.467559268 +0000 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:31:50.467559268 +0000 2026-04-15T13:34:28.705 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:34:28.705 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme1n1 of=/dev/null count=1 2026-04-15T13:34:28.754 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:34:28.754 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:34:28.754 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000216064 s, 2.4 MB/s 2026-04-15T13:34:28.755 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme1n1 2026-04-15T13:34:28.802 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme2n1 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme2n1 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 915 Links: 1 Device type: 103,5 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:31:50.819559268 +0000 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:31:50.775559268 +0000 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:31:50.775559268 +0000 2026-04-15T13:34:28.848 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:34:28.849 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme2n1 of=/dev/null count=1 2026-04-15T13:34:28.896 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:34:28.896 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:34:28.897 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000189976 s, 2.7 MB/s 2026-04-15T13:34:28.897 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme2n1 2026-04-15T13:34:28.946 DEBUG:teuthology.orchestra.run.vm09:> stat /dev/nvme3n1 2026-04-15T13:34:28.992 INFO:teuthology.orchestra.run.vm09.stdout: File: /dev/nvme3n1 2026-04-15T13:34:28.992 INFO:teuthology.orchestra.run.vm09.stdout: Size: 0 Blocks: 0 IO Block: 4096 block special file 2026-04-15T13:34:28.993 INFO:teuthology.orchestra.run.vm09.stdout:Device: 5h/5d Inode: 925 Links: 1 Device type: 103,7 2026-04-15T13:34:28.993 INFO:teuthology.orchestra.run.vm09.stdout:Access: (0660/brw-rw----) Uid: ( 0/ root) Gid: ( 6/ disk) 2026-04-15T13:34:28.993 INFO:teuthology.orchestra.run.vm09.stdout:Access: 2026-04-15 13:31:51.131559268 +0000 2026-04-15T13:34:28.993 INFO:teuthology.orchestra.run.vm09.stdout:Modify: 2026-04-15 13:31:51.083559268 +0000 2026-04-15T13:34:28.993 INFO:teuthology.orchestra.run.vm09.stdout:Change: 2026-04-15 13:31:51.083559268 +0000 2026-04-15T13:34:28.993 INFO:teuthology.orchestra.run.vm09.stdout: Birth: - 2026-04-15T13:34:28.993 DEBUG:teuthology.orchestra.run.vm09:> sudo dd if=/dev/nvme3n1 of=/dev/null count=1 2026-04-15T13:34:29.041 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records in 2026-04-15T13:34:29.041 INFO:teuthology.orchestra.run.vm09.stderr:1+0 records out 2026-04-15T13:34:29.042 INFO:teuthology.orchestra.run.vm09.stderr:512 bytes copied, 0.000155711 s, 3.3 MB/s 2026-04-15T13:34:29.042 DEBUG:teuthology.orchestra.run.vm09:> ! mount | grep -v devtmpfs | grep -q /dev/nvme3n1 2026-04-15T13:34:29.090 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch apply osd --all-available-devices 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:27.344679+0000 mgr.vm06.qbbldl (mgr.14229) 33 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:27.344679+0000 mgr.vm06.qbbldl (mgr.14229) 33 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cephadm 2026-04-15T13:34:27.351516+0000 mgr.vm06.qbbldl (mgr.14229) 34 : cephadm [INF] Reconfiguring daemon grafana.vm06 on vm06 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cephadm 2026-04-15T13:34:27.351516+0000 mgr.vm06.qbbldl (mgr.14229) 34 : cephadm [INF] Reconfiguring daemon grafana.vm06 on vm06 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.111500+0000 mgr.vm06.qbbldl (mgr.14229) 35 : cephadm [INF] Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.111500+0000 mgr.vm06.qbbldl (mgr.14229) 35 : cephadm [INF] Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.113778+0000 mgr.vm06.qbbldl (mgr.14229) 36 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.113778+0000 mgr.vm06.qbbldl (mgr.14229) 36 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cluster 2026-04-15T13:34:28.166143+0000 mgr.vm06.qbbldl (mgr.14229) 37 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cluster 2026-04-15T13:34:28.166143+0000 mgr.vm06.qbbldl (mgr.14229) 37 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.680594+0000 mon.vm06 (mon.0) 319 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.680594+0000 mon.vm06 (mon.0) 319 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.684328+0000 mon.vm06 (mon.0) 320 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.684328+0000 mon.vm06 (mon.0) 320 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cluster 2026-04-15T13:34:28.837710+0000 mon.vm06 (mon.0) 321 : cluster [DBG] Standby manager daemon vm09.kpawde started 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: cluster 2026-04-15T13:34:28.837710+0000 mon.vm06 (mon.0) 321 : cluster [DBG] Standby manager daemon vm09.kpawde started 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.840598+0000 mon.vm06 (mon.0) 322 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/crt"} : dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.840598+0000 mon.vm06 (mon.0) 322 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/crt"} : dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.841165+0000 mon.vm06 (mon.0) 323 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.841165+0000 mon.vm06 (mon.0) 323 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.843489+0000 mon.vm06 (mon.0) 324 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/key"} : dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.843489+0000 mon.vm06 (mon.0) 324 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/key"} : dispatch 2026-04-15T13:34:29.258 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.844784+0000 mon.vm06 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-15T13:34:29.259 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:29 vm06 bash[28114]: audit 2026-04-15T13:34:28.844784+0000 mon.vm06 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-15T13:34:29.403 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm09/config 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:27.344679+0000 mgr.vm06.qbbldl (mgr.14229) 33 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:27.344679+0000 mgr.vm06.qbbldl (mgr.14229) 33 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-ssl-verify", "value": "false"}]: dispatch 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cephadm 2026-04-15T13:34:27.351516+0000 mgr.vm06.qbbldl (mgr.14229) 34 : cephadm [INF] Reconfiguring daemon grafana.vm06 on vm06 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cephadm 2026-04-15T13:34:27.351516+0000 mgr.vm06.qbbldl (mgr.14229) 34 : cephadm [INF] Reconfiguring daemon grafana.vm06 on vm06 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.111500+0000 mgr.vm06.qbbldl (mgr.14229) 35 : cephadm [INF] Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.111500+0000 mgr.vm06.qbbldl (mgr.14229) 35 : cephadm [INF] Reconfiguring ceph-exporter.vm06 (monmap changed)... 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.113778+0000 mgr.vm06.qbbldl (mgr.14229) 36 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.113778+0000 mgr.vm06.qbbldl (mgr.14229) 36 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm06 on vm06 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cluster 2026-04-15T13:34:28.166143+0000 mgr.vm06.qbbldl (mgr.14229) 37 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cluster 2026-04-15T13:34:28.166143+0000 mgr.vm06.qbbldl (mgr.14229) 37 : cluster [DBG] pgmap v3: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.680594+0000 mon.vm06 (mon.0) 319 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.680594+0000 mon.vm06 (mon.0) 319 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.684328+0000 mon.vm06 (mon.0) 320 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.684328+0000 mon.vm06 (mon.0) 320 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cluster 2026-04-15T13:34:28.837710+0000 mon.vm06 (mon.0) 321 : cluster [DBG] Standby manager daemon vm09.kpawde started 2026-04-15T13:34:29.425 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: cluster 2026-04-15T13:34:28.837710+0000 mon.vm06 (mon.0) 321 : cluster [DBG] Standby manager daemon vm09.kpawde started 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.840598+0000 mon.vm06 (mon.0) 322 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/crt"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.840598+0000 mon.vm06 (mon.0) 322 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/crt"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.841165+0000 mon.vm06 (mon.0) 323 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.841165+0000 mon.vm06 (mon.0) 323 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/crt"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.843489+0000 mon.vm06 (mon.0) 324 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/key"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.843489+0000 mon.vm06 (mon.0) 324 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/vm09.kpawde/key"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.844784+0000 mon.vm06 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-15T13:34:29.426 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:29 vm09 bash[34466]: audit 2026-04-15T13:34:28.844784+0000 mon.vm06 (mon.0) 325 : audit [DBG] from='mgr.? 192.168.123.109:0/2614618704' entity='mgr.vm09.kpawde' cmd={"prefix": "config-key get", "key": "mgr/dashboard/key"} : dispatch 2026-04-15T13:34:29.791 INFO:teuthology.orchestra.run.vm09.stdout:Scheduled osd.all-available-devices update... 2026-04-15T13:34:29.876 INFO:tasks.cephadm:Waiting for 8 OSDs to come up... 2026-04-15T13:34:29.876 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:30.137 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.685066+0000 mgr.vm06.qbbldl (mgr.14229) 38 : cephadm [INF] Reconfiguring alertmanager.vm06 deps ['mgr.vm06.qbbldl', 'secure_monitoring_stack:False'] -> ['alertmanager.vm06', 'mgr.vm06.qbbldl', 'mgr.vm09.kpawde', 'secure_monitoring_stack:False'] (diff {'alertmanager.vm06', 'mgr.vm09.kpawde'}) 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.685066+0000 mgr.vm06.qbbldl (mgr.14229) 38 : cephadm [INF] Reconfiguring alertmanager.vm06 deps ['mgr.vm06.qbbldl', 'secure_monitoring_stack:False'] -> ['alertmanager.vm06', 'mgr.vm06.qbbldl', 'mgr.vm09.kpawde', 'secure_monitoring_stack:False'] (diff {'alertmanager.vm06', 'mgr.vm09.kpawde'}) 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.688476+0000 mgr.vm06.qbbldl (mgr.14229) 39 : cephadm [INF] Reconfiguring daemon alertmanager.vm06 on vm06 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: cephadm 2026-04-15T13:34:28.688476+0000 mgr.vm06.qbbldl (mgr.14229) 39 : cephadm [INF] Reconfiguring daemon alertmanager.vm06 on vm06 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.386094+0000 mon.vm06 (mon.0) 326 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.386094+0000 mon.vm06 (mon.0) 326 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.390803+0000 mon.vm06 (mon.0) 327 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.390803+0000 mon.vm06 (mon.0) 327 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.391760+0000 mon.vm06 (mon.0) 328 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.391760+0000 mon.vm06 (mon.0) 328 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.392291+0000 mon.vm06 (mon.0) 329 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.392291+0000 mon.vm06 (mon.0) 329 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: cluster 2026-04-15T13:34:29.693198+0000 mon.vm06 (mon.0) 330 : cluster [DBG] mgrmap e18: vm06.qbbldl(active, since 21s), standbys: vm09.kpawde 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: cluster 2026-04-15T13:34:29.693198+0000 mon.vm06 (mon.0) 330 : cluster [DBG] mgrmap e18: vm06.qbbldl(active, since 21s), standbys: vm09.kpawde 2026-04-15T13:34:30.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.693900+0000 mon.vm06 (mon.0) 331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm09.kpawde", "id": "vm09.kpawde"} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.693900+0000 mon.vm06 (mon.0) 331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm09.kpawde", "id": "vm09.kpawde"} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.792462+0000 mon.vm06 (mon.0) 332 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.792462+0000 mon.vm06 (mon.0) 332 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.811763+0000 mon.vm06 (mon.0) 333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.811763+0000 mon.vm06 (mon.0) 333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.815592+0000 mon.vm06 (mon.0) 334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.815592+0000 mon.vm06 (mon.0) 334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.816260+0000 mon.vm06 (mon.0) 335 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.816260+0000 mon.vm06 (mon.0) 335 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.816839+0000 mon.vm06 (mon.0) 336 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.816839+0000 mon.vm06 (mon.0) 336 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.817269+0000 mon.vm06 (mon.0) 337 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:30 vm09 bash[34466]: audit 2026-04-15T13:34:29.817269+0000 mon.vm06 (mon.0) 337 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.685066+0000 mgr.vm06.qbbldl (mgr.14229) 38 : cephadm [INF] Reconfiguring alertmanager.vm06 deps ['mgr.vm06.qbbldl', 'secure_monitoring_stack:False'] -> ['alertmanager.vm06', 'mgr.vm06.qbbldl', 'mgr.vm09.kpawde', 'secure_monitoring_stack:False'] (diff {'alertmanager.vm06', 'mgr.vm09.kpawde'}) 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.685066+0000 mgr.vm06.qbbldl (mgr.14229) 38 : cephadm [INF] Reconfiguring alertmanager.vm06 deps ['mgr.vm06.qbbldl', 'secure_monitoring_stack:False'] -> ['alertmanager.vm06', 'mgr.vm06.qbbldl', 'mgr.vm09.kpawde', 'secure_monitoring_stack:False'] (diff {'alertmanager.vm06', 'mgr.vm09.kpawde'}) 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.688476+0000 mgr.vm06.qbbldl (mgr.14229) 39 : cephadm [INF] Reconfiguring daemon alertmanager.vm06 on vm06 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: cephadm 2026-04-15T13:34:28.688476+0000 mgr.vm06.qbbldl (mgr.14229) 39 : cephadm [INF] Reconfiguring daemon alertmanager.vm06 on vm06 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.386094+0000 mon.vm06 (mon.0) 326 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.386094+0000 mon.vm06 (mon.0) 326 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.458 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.390803+0000 mon.vm06 (mon.0) 327 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.390803+0000 mon.vm06 (mon.0) 327 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.391760+0000 mon.vm06 (mon.0) 328 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.391760+0000 mon.vm06 (mon.0) 328 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.crash.vm09", "caps": ["mon", "profile crash", "mgr", "profile crash"]} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.392291+0000 mon.vm06 (mon.0) 329 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.392291+0000 mon.vm06 (mon.0) 329 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: cluster 2026-04-15T13:34:29.693198+0000 mon.vm06 (mon.0) 330 : cluster [DBG] mgrmap e18: vm06.qbbldl(active, since 21s), standbys: vm09.kpawde 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: cluster 2026-04-15T13:34:29.693198+0000 mon.vm06 (mon.0) 330 : cluster [DBG] mgrmap e18: vm06.qbbldl(active, since 21s), standbys: vm09.kpawde 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.693900+0000 mon.vm06 (mon.0) 331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm09.kpawde", "id": "vm09.kpawde"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.693900+0000 mon.vm06 (mon.0) 331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr metadata", "who": "vm09.kpawde", "id": "vm09.kpawde"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.792462+0000 mon.vm06 (mon.0) 332 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.792462+0000 mon.vm06 (mon.0) 332 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.811763+0000 mon.vm06 (mon.0) 333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.811763+0000 mon.vm06 (mon.0) 333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.815592+0000 mon.vm06 (mon.0) 334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.815592+0000 mon.vm06 (mon.0) 334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.816260+0000 mon.vm06 (mon.0) 335 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.816260+0000 mon.vm06 (mon.0) 335 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "mgr.vm09.kpawde", "caps": ["mon", "profile mgr", "osd", "allow *", "mds", "allow *"]} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.816839+0000 mon.vm06 (mon.0) 336 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.816839+0000 mon.vm06 (mon.0) 336 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mgr services"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.817269+0000 mon.vm06 (mon.0) 337 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.459 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:30 vm06 bash[28114]: audit 2026-04-15T13:34:29.817269+0000 mon.vm06 (mon.0) 337 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:30.522 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:30.622 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.391543+0000 mgr.vm06.qbbldl (mgr.14229) 40 : cephadm [INF] Reconfiguring crash.vm09 (monmap changed)... 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.391543+0000 mgr.vm06.qbbldl (mgr.14229) 40 : cephadm [INF] Reconfiguring crash.vm09 (monmap changed)... 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.392881+0000 mgr.vm06.qbbldl (mgr.14229) 41 : cephadm [INF] Reconfiguring daemon crash.vm09 on vm09 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.392881+0000 mgr.vm06.qbbldl (mgr.14229) 41 : cephadm [INF] Reconfiguring daemon crash.vm09 on vm09 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:29.787719+0000 mgr.vm06.qbbldl (mgr.14229) 42 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:29.787719+0000 mgr.vm06.qbbldl (mgr.14229) 42 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.788858+0000 mgr.vm06.qbbldl (mgr.14229) 43 : cephadm [INF] Marking host: vm06 for OSDSpec preview refresh. 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.788858+0000 mgr.vm06.qbbldl (mgr.14229) 43 : cephadm [INF] Marking host: vm06 for OSDSpec preview refresh. 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.788881+0000 mgr.vm06.qbbldl (mgr.14229) 44 : cephadm [INF] Marking host: vm09 for OSDSpec preview refresh. 2026-04-15T13:34:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.788881+0000 mgr.vm06.qbbldl (mgr.14229) 44 : cephadm [INF] Marking host: vm09 for OSDSpec preview refresh. 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.789042+0000 mgr.vm06.qbbldl (mgr.14229) 45 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.789042+0000 mgr.vm06.qbbldl (mgr.14229) 45 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.816077+0000 mgr.vm06.qbbldl (mgr.14229) 46 : cephadm [INF] Reconfiguring mgr.vm09.kpawde (monmap changed)... 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.816077+0000 mgr.vm06.qbbldl (mgr.14229) 46 : cephadm [INF] Reconfiguring mgr.vm09.kpawde (monmap changed)... 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.817749+0000 mgr.vm06.qbbldl (mgr.14229) 47 : cephadm [INF] Reconfiguring daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:29.817749+0000 mgr.vm06.qbbldl (mgr.14229) 47 : cephadm [INF] Reconfiguring daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cluster 2026-04-15T13:34:30.166350+0000 mgr.vm06.qbbldl (mgr.14229) 48 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cluster 2026-04-15T13:34:30.166350+0000 mgr.vm06.qbbldl (mgr.14229) 48 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.222189+0000 mon.vm06 (mon.0) 338 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.222189+0000 mon.vm06 (mon.0) 338 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.226956+0000 mon.vm06 (mon.0) 339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.226956+0000 mon.vm06 (mon.0) 339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.227473+0000 mgr.vm06.qbbldl (mgr.14229) 49 : cephadm [INF] Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.227473+0000 mgr.vm06.qbbldl (mgr.14229) 49 : cephadm [INF] Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.228828+0000 mon.vm06 (mon.0) 340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.228828+0000 mon.vm06 (mon.0) 340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.230054+0000 mon.vm06 (mon.0) 341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.230054+0000 mon.vm06 (mon.0) 341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.230957+0000 mgr.vm06.qbbldl (mgr.14229) 50 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.230957+0000 mgr.vm06.qbbldl (mgr.14229) 50 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.522806+0000 mon.vm06 (mon.0) 342 : audit [DBG] from='client.? 192.168.123.106:0/4266630847' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.522806+0000 mon.vm06 (mon.0) 342 : audit [DBG] from='client.? 192.168.123.106:0/4266630847' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.763958+0000 mon.vm06 (mon.0) 343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.763958+0000 mon.vm06 (mon.0) 343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.767524+0000 mon.vm06 (mon.0) 344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.767524+0000 mon.vm06 (mon.0) 344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.768246+0000 mon.vm06 (mon.0) 345 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.768246+0000 mon.vm06 (mon.0) 345 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.768748+0000 mon.vm06 (mon.0) 346 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.768748+0000 mon.vm06 (mon.0) 346 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.769190+0000 mon.vm06 (mon.0) 347 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:30.769190+0000 mon.vm06 (mon.0) 347 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.149182+0000 mon.vm06 (mon.0) 348 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.149182+0000 mon.vm06 (mon.0) 348 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.153820+0000 mon.vm06 (mon.0) 349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.153820+0000 mon.vm06 (mon.0) 349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.161447+0000 mon.vm06 (mon.0) 350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.161447+0000 mon.vm06 (mon.0) 350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.165263+0000 mon.vm06 (mon.0) 351 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.165263+0000 mon.vm06 (mon.0) 351 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.167867+0000 mon.vm06 (mon.0) 352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.167867+0000 mon.vm06 (mon.0) 352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.169075+0000 mon.vm06 (mon.0) 353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.169075+0000 mon.vm06 (mon.0) 353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"} : dispatch 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.172374+0000 mon.vm06 (mon.0) 354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.172374+0000 mon.vm06 (mon.0) 354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.182332+0000 mon.vm06 (mon.0) 355 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.182332+0000 mon.vm06 (mon.0) 355 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.185920+0000 mon.vm06 (mon.0) 356 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.185920+0000 mon.vm06 (mon.0) 356 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.187599+0000 mon.vm06 (mon.0) 357 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.187599+0000 mon.vm06 (mon.0) 357 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.188397+0000 mon.vm06 (mon.0) 358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.188397+0000 mon.vm06 (mon.0) 358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.191048+0000 mon.vm06 (mon.0) 359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.191048+0000 mon.vm06 (mon.0) 359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.198079+0000 mon.vm06 (mon.0) 360 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.198079+0000 mon.vm06 (mon.0) 360 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.201892+0000 mon.vm06 (mon.0) 361 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.201892+0000 mon.vm06 (mon.0) 361 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.203392+0000 mon.vm06 (mon.0) 362 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.203392+0000 mon.vm06 (mon.0) 362 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.204085+0000 mon.vm06 (mon.0) 363 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.204085+0000 mon.vm06 (mon.0) 363 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"} : dispatch 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.206638+0000 mon.vm06 (mon.0) 364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:31 vm09 bash[34466]: audit 2026-04-15T13:34:31.206638+0000 mon.vm06 (mon.0) 364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.391543+0000 mgr.vm06.qbbldl (mgr.14229) 40 : cephadm [INF] Reconfiguring crash.vm09 (monmap changed)... 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.391543+0000 mgr.vm06.qbbldl (mgr.14229) 40 : cephadm [INF] Reconfiguring crash.vm09 (monmap changed)... 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.392881+0000 mgr.vm06.qbbldl (mgr.14229) 41 : cephadm [INF] Reconfiguring daemon crash.vm09 on vm09 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.392881+0000 mgr.vm06.qbbldl (mgr.14229) 41 : cephadm [INF] Reconfiguring daemon crash.vm09 on vm09 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:29.787719+0000 mgr.vm06.qbbldl (mgr.14229) 42 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:29.787719+0000 mgr.vm06.qbbldl (mgr.14229) 42 : audit [DBG] from='client.14274 -' entity='client.admin' cmd=[{"prefix": "orch apply osd", "all_available_devices": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.788858+0000 mgr.vm06.qbbldl (mgr.14229) 43 : cephadm [INF] Marking host: vm06 for OSDSpec preview refresh. 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.788858+0000 mgr.vm06.qbbldl (mgr.14229) 43 : cephadm [INF] Marking host: vm06 for OSDSpec preview refresh. 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.788881+0000 mgr.vm06.qbbldl (mgr.14229) 44 : cephadm [INF] Marking host: vm09 for OSDSpec preview refresh. 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.788881+0000 mgr.vm06.qbbldl (mgr.14229) 44 : cephadm [INF] Marking host: vm09 for OSDSpec preview refresh. 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.789042+0000 mgr.vm06.qbbldl (mgr.14229) 45 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.789042+0000 mgr.vm06.qbbldl (mgr.14229) 45 : cephadm [INF] Saving service osd.all-available-devices spec with placement * 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.816077+0000 mgr.vm06.qbbldl (mgr.14229) 46 : cephadm [INF] Reconfiguring mgr.vm09.kpawde (monmap changed)... 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.816077+0000 mgr.vm06.qbbldl (mgr.14229) 46 : cephadm [INF] Reconfiguring mgr.vm09.kpawde (monmap changed)... 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.817749+0000 mgr.vm06.qbbldl (mgr.14229) 47 : cephadm [INF] Reconfiguring daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:29.817749+0000 mgr.vm06.qbbldl (mgr.14229) 47 : cephadm [INF] Reconfiguring daemon mgr.vm09.kpawde on vm09 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cluster 2026-04-15T13:34:30.166350+0000 mgr.vm06.qbbldl (mgr.14229) 48 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cluster 2026-04-15T13:34:30.166350+0000 mgr.vm06.qbbldl (mgr.14229) 48 : cluster [DBG] pgmap v4: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.222189+0000 mon.vm06 (mon.0) 338 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.222189+0000 mon.vm06 (mon.0) 338 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.226956+0000 mon.vm06 (mon.0) 339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.226956+0000 mon.vm06 (mon.0) 339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.227473+0000 mgr.vm06.qbbldl (mgr.14229) 49 : cephadm [INF] Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.227473+0000 mgr.vm06.qbbldl (mgr.14229) 49 : cephadm [INF] Reconfiguring ceph-exporter.vm09 (monmap changed)... 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.228828+0000 mon.vm06 (mon.0) 340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.228828+0000 mon.vm06 (mon.0) 340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.ceph-exporter.vm09", "caps": ["mon", "profile ceph-exporter", "mon", "allow r", "mgr", "allow r", "osd", "allow r"]} : dispatch 2026-04-15T13:34:31.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.230054+0000 mon.vm06 (mon.0) 341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.230054+0000 mon.vm06 (mon.0) 341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.230957+0000 mgr.vm06.qbbldl (mgr.14229) 50 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.230957+0000 mgr.vm06.qbbldl (mgr.14229) 50 : cephadm [INF] Reconfiguring daemon ceph-exporter.vm09 on vm09 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.522806+0000 mon.vm06 (mon.0) 342 : audit [DBG] from='client.? 192.168.123.106:0/4266630847' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.522806+0000 mon.vm06 (mon.0) 342 : audit [DBG] from='client.? 192.168.123.106:0/4266630847' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.763958+0000 mon.vm06 (mon.0) 343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.763958+0000 mon.vm06 (mon.0) 343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.767524+0000 mon.vm06 (mon.0) 344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.767524+0000 mon.vm06 (mon.0) 344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.768246+0000 mon.vm06 (mon.0) 345 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.768246+0000 mon.vm06 (mon.0) 345 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "mon."} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.768748+0000 mon.vm06 (mon.0) 346 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.768748+0000 mon.vm06 (mon.0) 346 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "mon", "key": "public_network"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.769190+0000 mon.vm06 (mon.0) 347 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:30.769190+0000 mon.vm06 (mon.0) 347 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.149182+0000 mon.vm06 (mon.0) 348 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.149182+0000 mon.vm06 (mon.0) 348 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.153820+0000 mon.vm06 (mon.0) 349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.153820+0000 mon.vm06 (mon.0) 349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.161447+0000 mon.vm06 (mon.0) 350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.161447+0000 mon.vm06 (mon.0) 350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.165263+0000 mon.vm06 (mon.0) 351 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.165263+0000 mon.vm06 (mon.0) 351 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.167867+0000 mon.vm06 (mon.0) 352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.167867+0000 mon.vm06 (mon.0) 352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.169075+0000 mon.vm06 (mon.0) 353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.169075+0000 mon.vm06 (mon.0) 353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.172374+0000 mon.vm06 (mon.0) 354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.172374+0000 mon.vm06 (mon.0) 354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.182332+0000 mon.vm06 (mon.0) 355 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.182332+0000 mon.vm06 (mon.0) 355 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.185920+0000 mon.vm06 (mon.0) 356 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.185920+0000 mon.vm06 (mon.0) 356 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.187599+0000 mon.vm06 (mon.0) 357 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.187599+0000 mon.vm06 (mon.0) 357 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-grafana-api-url"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.188397+0000 mon.vm06 (mon.0) 358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.188397+0000 mon.vm06 (mon.0) 358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.191048+0000 mon.vm06 (mon.0) 359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.191048+0000 mon.vm06 (mon.0) 359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.198079+0000 mon.vm06 (mon.0) 360 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.198079+0000 mon.vm06 (mon.0) 360 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.201892+0000 mon.vm06 (mon.0) 361 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.201892+0000 mon.vm06 (mon.0) 361 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.203392+0000 mon.vm06 (mon.0) 362 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.203392+0000 mon.vm06 (mon.0) 362 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-alertmanager-api-host"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.204085+0000 mon.vm06 (mon.0) 363 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.204085+0000 mon.vm06 (mon.0) 363 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"} : dispatch 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.206638+0000 mon.vm06 (mon.0) 364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:31 vm06 bash[28114]: audit 2026-04-15T13:34:31.206638+0000 mon.vm06 (mon.0) 364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:31.623 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:31.908 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:32.300 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.768073+0000 mgr.vm06.qbbldl (mgr.14229) 51 : cephadm [INF] Reconfiguring mon.vm09 (monmap changed)... 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.768073+0000 mgr.vm06.qbbldl (mgr.14229) 51 : cephadm [INF] Reconfiguring mon.vm09 (monmap changed)... 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.769832+0000 mgr.vm06.qbbldl (mgr.14229) 52 : cephadm [INF] Reconfiguring daemon mon.vm09 on vm09 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: cephadm 2026-04-15T13:34:30.769832+0000 mgr.vm06.qbbldl (mgr.14229) 52 : cephadm [INF] Reconfiguring daemon mon.vm09 on vm09 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.168238+0000 mgr.vm06.qbbldl (mgr.14229) 53 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.168238+0000 mgr.vm06.qbbldl (mgr.14229) 53 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.169286+0000 mgr.vm06.qbbldl (mgr.14229) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.169286+0000 mgr.vm06.qbbldl (mgr.14229) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.187836+0000 mgr.vm06.qbbldl (mgr.14229) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.187836+0000 mgr.vm06.qbbldl (mgr.14229) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.188571+0000 mgr.vm06.qbbldl (mgr.14229) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-04-15T13:34:32.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.188571+0000 mgr.vm06.qbbldl (mgr.14229) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.203582+0000 mgr.vm06.qbbldl (mgr.14229) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.203582+0000 mgr.vm06.qbbldl (mgr.14229) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.204251+0000 mgr.vm06.qbbldl (mgr.14229) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.204251+0000 mgr.vm06.qbbldl (mgr.14229) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.249475+0000 mon.vm06 (mon.0) 365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.249475+0000 mon.vm06 (mon.0) 365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.249918+0000 mgr.vm06.qbbldl (mgr.14229) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.249918+0000 mgr.vm06.qbbldl (mgr.14229) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.424428+0000 mon.vm06 (mon.0) 366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.424428+0000 mon.vm06 (mon.0) 366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.757959+0000 mon.vm06 (mon.0) 367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.312 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:32 vm06 bash[28114]: audit 2026-04-15T13:34:31.757959+0000 mon.vm06 (mon.0) 367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.379 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.768073+0000 mgr.vm06.qbbldl (mgr.14229) 51 : cephadm [INF] Reconfiguring mon.vm09 (monmap changed)... 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.768073+0000 mgr.vm06.qbbldl (mgr.14229) 51 : cephadm [INF] Reconfiguring mon.vm09 (monmap changed)... 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.769832+0000 mgr.vm06.qbbldl (mgr.14229) 52 : cephadm [INF] Reconfiguring daemon mon.vm09 on vm09 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: cephadm 2026-04-15T13:34:30.769832+0000 mgr.vm06.qbbldl (mgr.14229) 52 : cephadm [INF] Reconfiguring daemon mon.vm09 on vm09 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.168238+0000 mgr.vm06.qbbldl (mgr.14229) 53 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.168238+0000 mgr.vm06.qbbldl (mgr.14229) 53 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.169286+0000 mgr.vm06.qbbldl (mgr.14229) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.169286+0000 mgr.vm06.qbbldl (mgr.14229) 54 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-prometheus-api-host", "value": "http://vm06.local:9095"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.187836+0000 mgr.vm06.qbbldl (mgr.14229) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.187836+0000 mgr.vm06.qbbldl (mgr.14229) 55 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-grafana-api-url"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.188571+0000 mgr.vm06.qbbldl (mgr.14229) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.188571+0000 mgr.vm06.qbbldl (mgr.14229) 56 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-grafana-api-url", "value": "https://vm06.local:3000"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.203582+0000 mgr.vm06.qbbldl (mgr.14229) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.203582+0000 mgr.vm06.qbbldl (mgr.14229) 57 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-alertmanager-api-host"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.204251+0000 mgr.vm06.qbbldl (mgr.14229) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.204251+0000 mgr.vm06.qbbldl (mgr.14229) 58 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard set-alertmanager-api-host", "value": "http://vm06.local:9093"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.249475+0000 mon.vm06 (mon.0) 365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.249475+0000 mon.vm06 (mon.0) 365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.249918+0000 mgr.vm06.qbbldl (mgr.14229) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.249918+0000 mgr.vm06.qbbldl (mgr.14229) 59 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.424428+0000 mon.vm06 (mon.0) 366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.424428+0000 mon.vm06 (mon.0) 366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.757959+0000 mon.vm06 (mon.0) 367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:32 vm09 bash[34466]: audit 2026-04-15T13:34:31.757959+0000 mon.vm06 (mon.0) 367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:33.379 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: cephadm 2026-04-15T13:34:31.423872+0000 mgr.vm06.qbbldl (mgr.14229) 60 : cephadm [INF] Certificate for "grafana_cert (vm06)" is still valid for 1094 days. 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: cephadm 2026-04-15T13:34:31.423872+0000 mgr.vm06.qbbldl (mgr.14229) 60 : cephadm [INF] Certificate for "grafana_cert (vm06)" is still valid for 1094 days. 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: audit 2026-04-15T13:34:31.758477+0000 mgr.vm06.qbbldl (mgr.14229) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: audit 2026-04-15T13:34:31.758477+0000 mgr.vm06.qbbldl (mgr.14229) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: cluster 2026-04-15T13:34:32.166528+0000 mgr.vm06.qbbldl (mgr.14229) 62 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: cluster 2026-04-15T13:34:32.166528+0000 mgr.vm06.qbbldl (mgr.14229) 62 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: audit 2026-04-15T13:34:32.301334+0000 mon.vm06 (mon.0) 368 : audit [DBG] from='client.? 192.168.123.106:0/2567695332' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: audit 2026-04-15T13:34:32.301334+0000 mon.vm06 (mon.0) 368 : audit [DBG] from='client.? 192.168.123.106:0/2567695332' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: audit 2026-04-15T13:34:32.366091+0000 mon.vm06 (mon.0) 369 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:33.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:33 vm06 bash[28114]: audit 2026-04-15T13:34:32.366091+0000 mon.vm06 (mon.0) 369 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: cephadm 2026-04-15T13:34:31.423872+0000 mgr.vm06.qbbldl (mgr.14229) 60 : cephadm [INF] Certificate for "grafana_cert (vm06)" is still valid for 1094 days. 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: cephadm 2026-04-15T13:34:31.423872+0000 mgr.vm06.qbbldl (mgr.14229) 60 : cephadm [INF] Certificate for "grafana_cert (vm06)" is still valid for 1094 days. 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: audit 2026-04-15T13:34:31.758477+0000 mgr.vm06.qbbldl (mgr.14229) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: audit 2026-04-15T13:34:31.758477+0000 mgr.vm06.qbbldl (mgr.14229) 61 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: cluster 2026-04-15T13:34:32.166528+0000 mgr.vm06.qbbldl (mgr.14229) 62 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: cluster 2026-04-15T13:34:32.166528+0000 mgr.vm06.qbbldl (mgr.14229) 62 : cluster [DBG] pgmap v5: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: audit 2026-04-15T13:34:32.301334+0000 mon.vm06 (mon.0) 368 : audit [DBG] from='client.? 192.168.123.106:0/2567695332' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: audit 2026-04-15T13:34:32.301334+0000 mon.vm06 (mon.0) 368 : audit [DBG] from='client.? 192.168.123.106:0/2567695332' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: audit 2026-04-15T13:34:32.366091+0000 mon.vm06 (mon.0) 369 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:33 vm09 bash[34466]: audit 2026-04-15T13:34:32.366091+0000 mon.vm06 (mon.0) 369 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:34:33.639 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:34.012 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:34.084 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T13:34:34.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:34 vm06 bash[28114]: audit 2026-04-15T13:34:32.369236+0000 mgr.vm06.qbbldl (mgr.14229) 63 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:34.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:34 vm06 bash[28114]: audit 2026-04-15T13:34:32.369236+0000 mgr.vm06.qbbldl (mgr.14229) 63 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:34.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:34 vm06 bash[28114]: audit 2026-04-15T13:34:34.012969+0000 mon.vm06 (mon.0) 370 : audit [DBG] from='client.? 192.168.123.106:0/130638195' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:34.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:34 vm06 bash[28114]: audit 2026-04-15T13:34:34.012969+0000 mon.vm06 (mon.0) 370 : audit [DBG] from='client.? 192.168.123.106:0/130638195' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:34 vm09 bash[34466]: audit 2026-04-15T13:34:32.369236+0000 mgr.vm06.qbbldl (mgr.14229) 63 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:34 vm09 bash[34466]: audit 2026-04-15T13:34:32.369236+0000 mgr.vm06.qbbldl (mgr.14229) 63 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:34:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:34 vm09 bash[34466]: audit 2026-04-15T13:34:34.012969+0000 mon.vm06 (mon.0) 370 : audit [DBG] from='client.? 192.168.123.106:0/130638195' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:34 vm09 bash[34466]: audit 2026-04-15T13:34:34.012969+0000 mon.vm06 (mon.0) 370 : audit [DBG] from='client.? 192.168.123.106:0/130638195' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:35.085 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:35.346 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:35.365 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:35 vm06 bash[28114]: cluster 2026-04-15T13:34:34.166704+0000 mgr.vm06.qbbldl (mgr.14229) 64 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:35.365 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:35 vm06 bash[28114]: cluster 2026-04-15T13:34:34.166704+0000 mgr.vm06.qbbldl (mgr.14229) 64 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:35.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:35 vm09 bash[34466]: cluster 2026-04-15T13:34:34.166704+0000 mgr.vm06.qbbldl (mgr.14229) 64 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:35.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:35 vm09 bash[34466]: cluster 2026-04-15T13:34:34.166704+0000 mgr.vm06.qbbldl (mgr.14229) 64 : cluster [DBG] pgmap v6: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:35.708 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:35.768 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.708895+0000 mon.vm06 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.106:0/2966928412' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.708895+0000 mon.vm06 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.106:0/2966928412' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.974599+0000 mon.vm06 (mon.0) 372 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.974599+0000 mon.vm06 (mon.0) 372 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.978670+0000 mon.vm06 (mon.0) 373 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.978670+0000 mon.vm06 (mon.0) 373 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.983031+0000 mon.vm06 (mon.0) 374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.983031+0000 mon.vm06 (mon.0) 374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.986739+0000 mon.vm06 (mon.0) 375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:36 vm06 bash[28114]: audit 2026-04-15T13:34:35.986739+0000 mon.vm06 (mon.0) 375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.708895+0000 mon.vm06 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.106:0/2966928412' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.708895+0000 mon.vm06 (mon.0) 371 : audit [DBG] from='client.? 192.168.123.106:0/2966928412' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.974599+0000 mon.vm06 (mon.0) 372 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.974599+0000 mon.vm06 (mon.0) 372 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.978670+0000 mon.vm06 (mon.0) 373 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.978670+0000 mon.vm06 (mon.0) 373 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.983031+0000 mon.vm06 (mon.0) 374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.983031+0000 mon.vm06 (mon.0) 374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.986739+0000 mon.vm06 (mon.0) 375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:36 vm09 bash[34466]: audit 2026-04-15T13:34:35.986739+0000 mon.vm06 (mon.0) 375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:36.769 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:37.050 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:37.405 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:37.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: cluster 2026-04-15T13:34:36.166864+0000 mgr.vm06.qbbldl (mgr.14229) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:37.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: cluster 2026-04-15T13:34:36.166864+0000 mgr.vm06.qbbldl (mgr.14229) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:37.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.469129+0000 mon.vm06 (mon.0) 376 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.469129+0000 mon.vm06 (mon.0) 376 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.473019+0000 mon.vm06 (mon.0) 377 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.473019+0000 mon.vm06 (mon.0) 377 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.476642+0000 mon.vm06 (mon.0) 378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.476642+0000 mon.vm06 (mon.0) 378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.480944+0000 mon.vm06 (mon.0) 379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.480944+0000 mon.vm06 (mon.0) 379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.481655+0000 mon.vm06 (mon.0) 380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.481655+0000 mon.vm06 (mon.0) 380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.482148+0000 mon.vm06 (mon.0) 381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.482148+0000 mon.vm06 (mon.0) 381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.485097+0000 mon.vm06 (mon.0) 382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.485097+0000 mon.vm06 (mon.0) 382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.486468+0000 mon.vm06 (mon.0) 383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.486468+0000 mon.vm06 (mon.0) 383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.488149+0000 mon.vm06 (mon.0) 384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.488149+0000 mon.vm06 (mon.0) 384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.488543+0000 mon.vm06 (mon.0) 385 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.488543+0000 mon.vm06 (mon.0) 385 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.489739+0000 mon.vm06 (mon.0) 386 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.489739+0000 mon.vm06 (mon.0) 386 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.490067+0000 mon.vm06 (mon.0) 387 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.417 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:37 vm06 bash[28114]: audit 2026-04-15T13:34:36.490067+0000 mon.vm06 (mon.0) 387 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.480 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":5,"num_osds":0,"num_up_osds":0,"osd_up_since":0,"num_in_osds":0,"osd_in_since":0,"num_remapped_pgs":0} 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: cluster 2026-04-15T13:34:36.166864+0000 mgr.vm06.qbbldl (mgr.14229) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: cluster 2026-04-15T13:34:36.166864+0000 mgr.vm06.qbbldl (mgr.14229) 65 : cluster [DBG] pgmap v7: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.469129+0000 mon.vm06 (mon.0) 376 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.469129+0000 mon.vm06 (mon.0) 376 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.473019+0000 mon.vm06 (mon.0) 377 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.473019+0000 mon.vm06 (mon.0) 377 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.476642+0000 mon.vm06 (mon.0) 378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.476642+0000 mon.vm06 (mon.0) 378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.480944+0000 mon.vm06 (mon.0) 379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.480944+0000 mon.vm06 (mon.0) 379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.481655+0000 mon.vm06 (mon.0) 380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.481655+0000 mon.vm06 (mon.0) 380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.482148+0000 mon.vm06 (mon.0) 381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.482148+0000 mon.vm06 (mon.0) 381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.485097+0000 mon.vm06 (mon.0) 382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.485097+0000 mon.vm06 (mon.0) 382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.486468+0000 mon.vm06 (mon.0) 383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.486468+0000 mon.vm06 (mon.0) 383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.488149+0000 mon.vm06 (mon.0) 384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.488149+0000 mon.vm06 (mon.0) 384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.488543+0000 mon.vm06 (mon.0) 385 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.488543+0000 mon.vm06 (mon.0) 385 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.489739+0000 mon.vm06 (mon.0) 386 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.489739+0000 mon.vm06 (mon.0) 386 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.bootstrap-osd"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.490067+0000 mon.vm06 (mon.0) 387 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:37.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:37 vm09 bash[34466]: audit 2026-04-15T13:34:36.490067+0000 mon.vm06 (mon.0) 387 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:38.481 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:37.405942+0000 mon.vm06 (mon.0) 388 : audit [DBG] from='client.? 192.168.123.106:0/3789700543' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:37.405942+0000 mon.vm06 (mon.0) 388 : audit [DBG] from='client.? 192.168.123.106:0/3789700543' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.197970+0000 mon.vm09 (mon.1) 2 : audit [INF] from='client.? 192.168.123.109:0/3632659984' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.197970+0000 mon.vm09 (mon.1) 2 : audit [INF] from='client.? 192.168.123.109:0/3632659984' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.200275+0000 mon.vm06 (mon.0) 389 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.200275+0000 mon.vm06 (mon.0) 389 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.203498+0000 mon.vm06 (mon.0) 390 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"}]': finished 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.203498+0000 mon.vm06 (mon.0) 390 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"}]': finished 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: cluster 2026-04-15T13:34:38.205727+0000 mon.vm06 (mon.0) 391 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: cluster 2026-04-15T13:34:38.205727+0000 mon.vm06 (mon.0) 391 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.205854+0000 mon.vm06 (mon.0) 392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:38.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:38 vm06 bash[28114]: audit 2026-04-15T13:34:38.205854+0000 mon.vm06 (mon.0) 392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:37.405942+0000 mon.vm06 (mon.0) 388 : audit [DBG] from='client.? 192.168.123.106:0/3789700543' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:37.405942+0000 mon.vm06 (mon.0) 388 : audit [DBG] from='client.? 192.168.123.106:0/3789700543' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.197970+0000 mon.vm09 (mon.1) 2 : audit [INF] from='client.? 192.168.123.109:0/3632659984' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.197970+0000 mon.vm09 (mon.1) 2 : audit [INF] from='client.? 192.168.123.109:0/3632659984' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.200275+0000 mon.vm06 (mon.0) 389 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.200275+0000 mon.vm06 (mon.0) 389 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.203498+0000 mon.vm06 (mon.0) 390 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"}]': finished 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.203498+0000 mon.vm06 (mon.0) 390 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "84143048-0065-4d27-be49-d79e8113f54d"}]': finished 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: cluster 2026-04-15T13:34:38.205727+0000 mon.vm06 (mon.0) 391 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: cluster 2026-04-15T13:34:38.205727+0000 mon.vm06 (mon.0) 391 : cluster [DBG] osdmap e6: 1 total, 0 up, 1 in 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.205854+0000 mon.vm06 (mon.0) 392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:38.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:38 vm09 bash[34466]: audit 2026-04-15T13:34:38.205854+0000 mon.vm06 (mon.0) 392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:38.746 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:39.114 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:39.189 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":7,"num_osds":2,"num_up_osds":0,"osd_up_since":0,"num_in_osds":2,"osd_in_since":1776260078,"num_remapped_pgs":0} 2026-04-15T13:34:39.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: cluster 2026-04-15T13:34:38.167058+0000 mgr.vm06.qbbldl (mgr.14229) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:39.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: cluster 2026-04-15T13:34:38.167058+0000 mgr.vm06.qbbldl (mgr.14229) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:39.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.333279+0000 mon.vm06 (mon.0) 393 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"} : dispatch 2026-04-15T13:34:39.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.333279+0000 mon.vm06 (mon.0) 393 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"} : dispatch 2026-04-15T13:34:39.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.335571+0000 mon.vm06 (mon.0) 394 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"}]': finished 2026-04-15T13:34:39.484 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.335571+0000 mon.vm06 (mon.0) 394 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"}]': finished 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: cluster 2026-04-15T13:34:38.337327+0000 mon.vm06 (mon.0) 395 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: cluster 2026-04-15T13:34:38.337327+0000 mon.vm06 (mon.0) 395 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.337428+0000 mon.vm06 (mon.0) 396 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.337428+0000 mon.vm06 (mon.0) 396 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.337526+0000 mon.vm06 (mon.0) 397 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.337526+0000 mon.vm06 (mon.0) 397 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.480282+0000 mon.vm06 (mon.0) 398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:38.480282+0000 mon.vm06 (mon.0) 398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:39.013841+0000 mon.vm09 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.109:0/641862995' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:39.013841+0000 mon.vm09 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.109:0/641862995' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:39.110880+0000 mon.vm06 (mon.0) 399 : audit [DBG] from='client.? 192.168.123.106:0/3229930483' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:39.110880+0000 mon.vm06 (mon.0) 399 : audit [DBG] from='client.? 192.168.123.106:0/3229930483' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:39.115662+0000 mon.vm06 (mon.0) 400 : audit [DBG] from='client.? 192.168.123.106:0/1469464189' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:39.485 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:39 vm06 bash[28114]: audit 2026-04-15T13:34:39.115662+0000 mon.vm06 (mon.0) 400 : audit [DBG] from='client.? 192.168.123.106:0/1469464189' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: cluster 2026-04-15T13:34:38.167058+0000 mgr.vm06.qbbldl (mgr.14229) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: cluster 2026-04-15T13:34:38.167058+0000 mgr.vm06.qbbldl (mgr.14229) 66 : cluster [DBG] pgmap v8: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.333279+0000 mon.vm06 (mon.0) 393 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.333279+0000 mon.vm06 (mon.0) 393 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.335571+0000 mon.vm06 (mon.0) 394 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"}]': finished 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.335571+0000 mon.vm06 (mon.0) 394 : audit [INF] from='client.? 192.168.123.106:0/4068196155' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "ef108fe4-72aa-4087-a097-576f30c554c6"}]': finished 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: cluster 2026-04-15T13:34:38.337327+0000 mon.vm06 (mon.0) 395 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: cluster 2026-04-15T13:34:38.337327+0000 mon.vm06 (mon.0) 395 : cluster [DBG] osdmap e7: 2 total, 0 up, 2 in 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.337428+0000 mon.vm06 (mon.0) 396 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.337428+0000 mon.vm06 (mon.0) 396 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.337526+0000 mon.vm06 (mon.0) 397 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.337526+0000 mon.vm06 (mon.0) 397 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.480282+0000 mon.vm06 (mon.0) 398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:38.480282+0000 mon.vm06 (mon.0) 398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:39.013841+0000 mon.vm09 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.109:0/641862995' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:39.013841+0000 mon.vm09 (mon.1) 3 : audit [DBG] from='client.? 192.168.123.109:0/641862995' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:39.110880+0000 mon.vm06 (mon.0) 399 : audit [DBG] from='client.? 192.168.123.106:0/3229930483' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:39.110880+0000 mon.vm06 (mon.0) 399 : audit [DBG] from='client.? 192.168.123.106:0/3229930483' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:39.115662+0000 mon.vm06 (mon.0) 400 : audit [DBG] from='client.? 192.168.123.106:0/1469464189' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:39 vm09 bash[34466]: audit 2026-04-15T13:34:39.115662+0000 mon.vm06 (mon.0) 400 : audit [DBG] from='client.? 192.168.123.106:0/1469464189' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:40.190 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:40.458 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:40.838 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:40.903 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":9,"num_osds":4,"num_up_osds":0,"osd_up_since":0,"num_in_osds":4,"osd_in_since":1776260080,"num_remapped_pgs":0} 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: cluster 2026-04-15T13:34:40.167227+0000 mgr.vm06.qbbldl (mgr.14229) 67 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: cluster 2026-04-15T13:34:40.167227+0000 mgr.vm06.qbbldl (mgr.14229) 67 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.274084+0000 mon.vm09 (mon.1) 4 : audit [INF] from='client.? 192.168.123.109:0/1690966141' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.274084+0000 mon.vm09 (mon.1) 4 : audit [INF] from='client.? 192.168.123.109:0/1690966141' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.276198+0000 mon.vm06 (mon.0) 401 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.276198+0000 mon.vm06 (mon.0) 401 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.278944+0000 mon.vm06 (mon.0) 402 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"}]': finished 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.278944+0000 mon.vm06 (mon.0) 402 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"}]': finished 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: cluster 2026-04-15T13:34:40.280259+0000 mon.vm06 (mon.0) 403 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: cluster 2026-04-15T13:34:40.280259+0000 mon.vm06 (mon.0) 403 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.280363+0000 mon.vm06 (mon.0) 404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.280363+0000 mon.vm06 (mon.0) 404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.280427+0000 mon.vm06 (mon.0) 405 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.280427+0000 mon.vm06 (mon.0) 405 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.280481+0000 mon.vm06 (mon.0) 406 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.280481+0000 mon.vm06 (mon.0) 406 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.316752+0000 mon.vm06 (mon.0) 407 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.316752+0000 mon.vm06 (mon.0) 407 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.319098+0000 mon.vm06 (mon.0) 408 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"}]': finished 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.319098+0000 mon.vm06 (mon.0) 408 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"}]': finished 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: cluster 2026-04-15T13:34:40.320438+0000 mon.vm06 (mon.0) 409 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: cluster 2026-04-15T13:34:40.320438+0000 mon.vm06 (mon.0) 409 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320524+0000 mon.vm06 (mon.0) 410 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320524+0000 mon.vm06 (mon.0) 410 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320593+0000 mon.vm06 (mon.0) 411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320593+0000 mon.vm06 (mon.0) 411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320651+0000 mon.vm06 (mon.0) 412 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320651+0000 mon.vm06 (mon.0) 412 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320715+0000 mon.vm06 (mon.0) 413 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.320715+0000 mon.vm06 (mon.0) 413 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.839078+0000 mon.vm06 (mon.0) 414 : audit [DBG] from='client.? 192.168.123.106:0/1747505420' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.839078+0000 mon.vm06 (mon.0) 414 : audit [DBG] from='client.? 192.168.123.106:0/1747505420' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.997669+0000 mon.vm09 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.109:0/2331672038' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:40.997669+0000 mon.vm09 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.109:0/2331672038' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:41.081315+0000 mon.vm06 (mon.0) 415 : audit [DBG] from='client.? 192.168.123.106:0/386409973' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:41 vm09 bash[34466]: audit 2026-04-15T13:34:41.081315+0000 mon.vm06 (mon.0) 415 : audit [DBG] from='client.? 192.168.123.106:0/386409973' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: cluster 2026-04-15T13:34:40.167227+0000 mgr.vm06.qbbldl (mgr.14229) 67 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: cluster 2026-04-15T13:34:40.167227+0000 mgr.vm06.qbbldl (mgr.14229) 67 : cluster [DBG] pgmap v11: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.274084+0000 mon.vm09 (mon.1) 4 : audit [INF] from='client.? 192.168.123.109:0/1690966141' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.274084+0000 mon.vm09 (mon.1) 4 : audit [INF] from='client.? 192.168.123.109:0/1690966141' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.276198+0000 mon.vm06 (mon.0) 401 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.276198+0000 mon.vm06 (mon.0) 401 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.278944+0000 mon.vm06 (mon.0) 402 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"}]': finished 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.278944+0000 mon.vm06 (mon.0) 402 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "549138af-e58f-4394-97f4-7a8c79a44f47"}]': finished 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: cluster 2026-04-15T13:34:40.280259+0000 mon.vm06 (mon.0) 403 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: cluster 2026-04-15T13:34:40.280259+0000 mon.vm06 (mon.0) 403 : cluster [DBG] osdmap e8: 3 total, 0 up, 3 in 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.280363+0000 mon.vm06 (mon.0) 404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.280363+0000 mon.vm06 (mon.0) 404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.280427+0000 mon.vm06 (mon.0) 405 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.280427+0000 mon.vm06 (mon.0) 405 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.280481+0000 mon.vm06 (mon.0) 406 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.280481+0000 mon.vm06 (mon.0) 406 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.316752+0000 mon.vm06 (mon.0) 407 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.316752+0000 mon.vm06 (mon.0) 407 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.319098+0000 mon.vm06 (mon.0) 408 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"}]': finished 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.319098+0000 mon.vm06 (mon.0) 408 : audit [INF] from='client.? 192.168.123.106:0/1313010770' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "dd7c0f1b-de8e-46ef-adf1-1743157ee826"}]': finished 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: cluster 2026-04-15T13:34:40.320438+0000 mon.vm06 (mon.0) 409 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: cluster 2026-04-15T13:34:40.320438+0000 mon.vm06 (mon.0) 409 : cluster [DBG] osdmap e9: 4 total, 0 up, 4 in 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320524+0000 mon.vm06 (mon.0) 410 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320524+0000 mon.vm06 (mon.0) 410 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320593+0000 mon.vm06 (mon.0) 411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320593+0000 mon.vm06 (mon.0) 411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320651+0000 mon.vm06 (mon.0) 412 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320651+0000 mon.vm06 (mon.0) 412 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320715+0000 mon.vm06 (mon.0) 413 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:41.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.320715+0000 mon.vm06 (mon.0) 413 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:41.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.839078+0000 mon.vm06 (mon.0) 414 : audit [DBG] from='client.? 192.168.123.106:0/1747505420' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:41.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.839078+0000 mon.vm06 (mon.0) 414 : audit [DBG] from='client.? 192.168.123.106:0/1747505420' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:41.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.997669+0000 mon.vm09 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.109:0/2331672038' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:40.997669+0000 mon.vm09 (mon.1) 5 : audit [DBG] from='client.? 192.168.123.109:0/2331672038' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:41.081315+0000 mon.vm06 (mon.0) 415 : audit [DBG] from='client.? 192.168.123.106:0/386409973' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:41 vm06 bash[28114]: audit 2026-04-15T13:34:41.081315+0000 mon.vm06 (mon.0) 415 : audit [DBG] from='client.? 192.168.123.106:0/386409973' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:41.903 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:42.173 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:41.999644+0000 mon.vm09 (mon.1) 6 : audit [INF] from='client.? 192.168.123.109:0/2232215897' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:41.999644+0000 mon.vm09 (mon.1) 6 : audit [INF] from='client.? 192.168.123.109:0/2232215897' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.001893+0000 mon.vm06 (mon.0) 416 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.001893+0000 mon.vm06 (mon.0) 416 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.004721+0000 mon.vm06 (mon.0) 417 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"}]': finished 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.004721+0000 mon.vm06 (mon.0) 417 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"}]': finished 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: cluster 2026-04-15T13:34:42.006857+0000 mon.vm06 (mon.0) 418 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: cluster 2026-04-15T13:34:42.006857+0000 mon.vm06 (mon.0) 418 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007018+0000 mon.vm06 (mon.0) 419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007018+0000 mon.vm06 (mon.0) 419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007138+0000 mon.vm06 (mon.0) 420 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007138+0000 mon.vm06 (mon.0) 420 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007236+0000 mon.vm06 (mon.0) 421 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007236+0000 mon.vm06 (mon.0) 421 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007332+0000 mon.vm06 (mon.0) 422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007332+0000 mon.vm06 (mon.0) 422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007425+0000 mon.vm06 (mon.0) 423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.007425+0000 mon.vm06 (mon.0) 423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.159852+0000 mon.vm06 (mon.0) 424 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.159852+0000 mon.vm06 (mon.0) 424 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.168838+0000 mon.vm06 (mon.0) 425 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"}]': finished 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.168838+0000 mon.vm06 (mon.0) 425 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"}]': finished 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: cluster 2026-04-15T13:34:42.170818+0000 mon.vm06 (mon.0) 426 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: cluster 2026-04-15T13:34:42.170818+0000 mon.vm06 (mon.0) 426 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.170949+0000 mon.vm06 (mon.0) 427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.170949+0000 mon.vm06 (mon.0) 427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171055+0000 mon.vm06 (mon.0) 428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171055+0000 mon.vm06 (mon.0) 428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171120+0000 mon.vm06 (mon.0) 429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171120+0000 mon.vm06 (mon.0) 429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171180+0000 mon.vm06 (mon.0) 430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171180+0000 mon.vm06 (mon.0) 430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171244+0000 mon.vm06 (mon.0) 431 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171244+0000 mon.vm06 (mon.0) 431 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171307+0000 mon.vm06 (mon.0) 432 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:42.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:42 vm09 bash[34466]: audit 2026-04-15T13:34:42.171307+0000 mon.vm06 (mon.0) 432 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:41.999644+0000 mon.vm09 (mon.1) 6 : audit [INF] from='client.? 192.168.123.109:0/2232215897' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:41.999644+0000 mon.vm09 (mon.1) 6 : audit [INF] from='client.? 192.168.123.109:0/2232215897' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.001893+0000 mon.vm06 (mon.0) 416 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.001893+0000 mon.vm06 (mon.0) 416 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.004721+0000 mon.vm06 (mon.0) 417 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"}]': finished 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.004721+0000 mon.vm06 (mon.0) 417 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "9acb1f73-ff77-43b9-839d-40df5a4f00f9"}]': finished 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: cluster 2026-04-15T13:34:42.006857+0000 mon.vm06 (mon.0) 418 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: cluster 2026-04-15T13:34:42.006857+0000 mon.vm06 (mon.0) 418 : cluster [DBG] osdmap e10: 5 total, 0 up, 5 in 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007018+0000 mon.vm06 (mon.0) 419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007018+0000 mon.vm06 (mon.0) 419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007138+0000 mon.vm06 (mon.0) 420 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007138+0000 mon.vm06 (mon.0) 420 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007236+0000 mon.vm06 (mon.0) 421 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007236+0000 mon.vm06 (mon.0) 421 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007332+0000 mon.vm06 (mon.0) 422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007332+0000 mon.vm06 (mon.0) 422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007425+0000 mon.vm06 (mon.0) 423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.007425+0000 mon.vm06 (mon.0) 423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.159852+0000 mon.vm06 (mon.0) 424 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.159852+0000 mon.vm06 (mon.0) 424 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.168838+0000 mon.vm06 (mon.0) 425 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"}]': finished 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.168838+0000 mon.vm06 (mon.0) 425 : audit [INF] from='client.? 192.168.123.106:0/76401302' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "0c8f1316-78b2-4ecc-a5cc-29a4a232978f"}]': finished 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: cluster 2026-04-15T13:34:42.170818+0000 mon.vm06 (mon.0) 426 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: cluster 2026-04-15T13:34:42.170818+0000 mon.vm06 (mon.0) 426 : cluster [DBG] osdmap e11: 6 total, 0 up, 6 in 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.170949+0000 mon.vm06 (mon.0) 427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.170949+0000 mon.vm06 (mon.0) 427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171055+0000 mon.vm06 (mon.0) 428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171055+0000 mon.vm06 (mon.0) 428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171120+0000 mon.vm06 (mon.0) 429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171120+0000 mon.vm06 (mon.0) 429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171180+0000 mon.vm06 (mon.0) 430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171180+0000 mon.vm06 (mon.0) 430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171244+0000 mon.vm06 (mon.0) 431 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171244+0000 mon.vm06 (mon.0) 431 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171307+0000 mon.vm06 (mon.0) 432 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:42.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:42 vm06 bash[28114]: audit 2026-04-15T13:34:42.171307+0000 mon.vm06 (mon.0) 432 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:42.581 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:42.667 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":11,"num_osds":6,"num_up_osds":0,"osd_up_since":0,"num_in_osds":6,"osd_in_since":1776260082,"num_remapped_pgs":0} 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: cluster 2026-04-15T13:34:42.167409+0000 mgr.vm06.qbbldl (mgr.14229) 68 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: cluster 2026-04-15T13:34:42.167409+0000 mgr.vm06.qbbldl (mgr.14229) 68 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: audit 2026-04-15T13:34:42.581905+0000 mon.vm06 (mon.0) 433 : audit [DBG] from='client.? 192.168.123.106:0/2117408063' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: audit 2026-04-15T13:34:42.581905+0000 mon.vm06 (mon.0) 433 : audit [DBG] from='client.? 192.168.123.106:0/2117408063' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: audit 2026-04-15T13:34:42.726702+0000 mon.vm09 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.109:0/1979643554' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: audit 2026-04-15T13:34:42.726702+0000 mon.vm09 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.109:0/1979643554' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: audit 2026-04-15T13:34:42.901676+0000 mon.vm06 (mon.0) 434 : audit [DBG] from='client.? 192.168.123.106:0/3244060237' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:43 vm06 bash[28114]: audit 2026-04-15T13:34:42.901676+0000 mon.vm06 (mon.0) 434 : audit [DBG] from='client.? 192.168.123.106:0/3244060237' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: cluster 2026-04-15T13:34:42.167409+0000 mgr.vm06.qbbldl (mgr.14229) 68 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: cluster 2026-04-15T13:34:42.167409+0000 mgr.vm06.qbbldl (mgr.14229) 68 : cluster [DBG] pgmap v15: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: audit 2026-04-15T13:34:42.581905+0000 mon.vm06 (mon.0) 433 : audit [DBG] from='client.? 192.168.123.106:0/2117408063' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: audit 2026-04-15T13:34:42.581905+0000 mon.vm06 (mon.0) 433 : audit [DBG] from='client.? 192.168.123.106:0/2117408063' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: audit 2026-04-15T13:34:42.726702+0000 mon.vm09 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.109:0/1979643554' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: audit 2026-04-15T13:34:42.726702+0000 mon.vm09 (mon.1) 7 : audit [DBG] from='client.? 192.168.123.109:0/1979643554' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: audit 2026-04-15T13:34:42.901676+0000 mon.vm06 (mon.0) 434 : audit [DBG] from='client.? 192.168.123.106:0/3244060237' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:43 vm09 bash[34466]: audit 2026-04-15T13:34:42.901676+0000 mon.vm06 (mon.0) 434 : audit [DBG] from='client.? 192.168.123.106:0/3244060237' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:43.668 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:43.930 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:44.302 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:44.365 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.752986+0000 mon.vm09 (mon.1) 8 : audit [INF] from='client.? 192.168.123.109:0/667363886' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.752986+0000 mon.vm09 (mon.1) 8 : audit [INF] from='client.? 192.168.123.109:0/667363886' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.755242+0000 mon.vm06 (mon.0) 435 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.755242+0000 mon.vm06 (mon.0) 435 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.758353+0000 mon.vm06 (mon.0) 436 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"}]': finished 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.758353+0000 mon.vm06 (mon.0) 436 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"}]': finished 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: cluster 2026-04-15T13:34:43.760300+0000 mon.vm06 (mon.0) 437 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: cluster 2026-04-15T13:34:43.760300+0000 mon.vm06 (mon.0) 437 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761576+0000 mon.vm06 (mon.0) 438 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761576+0000 mon.vm06 (mon.0) 438 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761641+0000 mon.vm06 (mon.0) 439 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761641+0000 mon.vm06 (mon.0) 439 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761699+0000 mon.vm06 (mon.0) 440 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761699+0000 mon.vm06 (mon.0) 440 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761751+0000 mon.vm06 (mon.0) 441 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761751+0000 mon.vm06 (mon.0) 441 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761801+0000 mon.vm06 (mon.0) 442 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761801+0000 mon.vm06 (mon.0) 442 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761848+0000 mon.vm06 (mon.0) 443 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761848+0000 mon.vm06 (mon.0) 443 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761899+0000 mon.vm06 (mon.0) 444 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.761899+0000 mon.vm06 (mon.0) 444 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.989230+0000 mon.vm06 (mon.0) 445 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.989230+0000 mon.vm06 (mon.0) 445 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.992546+0000 mon.vm06 (mon.0) 446 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"}]': finished 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.992546+0000 mon.vm06 (mon.0) 446 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"}]': finished 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: cluster 2026-04-15T13:34:43.995526+0000 mon.vm06 (mon.0) 447 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: cluster 2026-04-15T13:34:43.995526+0000 mon.vm06 (mon.0) 447 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995679+0000 mon.vm06 (mon.0) 448 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995679+0000 mon.vm06 (mon.0) 448 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995801+0000 mon.vm06 (mon.0) 449 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995801+0000 mon.vm06 (mon.0) 449 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995872+0000 mon.vm06 (mon.0) 450 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995872+0000 mon.vm06 (mon.0) 450 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995945+0000 mon.vm06 (mon.0) 451 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.995945+0000 mon.vm06 (mon.0) 451 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996005+0000 mon.vm06 (mon.0) 452 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996005+0000 mon.vm06 (mon.0) 452 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996066+0000 mon.vm06 (mon.0) 453 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996066+0000 mon.vm06 (mon.0) 453 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996127+0000 mon.vm06 (mon.0) 454 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996127+0000 mon.vm06 (mon.0) 454 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996193+0000 mon.vm06 (mon.0) 455 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:44.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:44 vm09 bash[34466]: audit 2026-04-15T13:34:43.996193+0000 mon.vm06 (mon.0) 455 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.752986+0000 mon.vm09 (mon.1) 8 : audit [INF] from='client.? 192.168.123.109:0/667363886' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.752986+0000 mon.vm09 (mon.1) 8 : audit [INF] from='client.? 192.168.123.109:0/667363886' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.755242+0000 mon.vm06 (mon.0) 435 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.755242+0000 mon.vm06 (mon.0) 435 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.758353+0000 mon.vm06 (mon.0) 436 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"}]': finished 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.758353+0000 mon.vm06 (mon.0) 436 : audit [INF] from='client.? ' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "1eeffe1e-e81f-4304-8758-28d246c7bbc1"}]': finished 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: cluster 2026-04-15T13:34:43.760300+0000 mon.vm06 (mon.0) 437 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: cluster 2026-04-15T13:34:43.760300+0000 mon.vm06 (mon.0) 437 : cluster [DBG] osdmap e12: 7 total, 0 up, 7 in 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761576+0000 mon.vm06 (mon.0) 438 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761576+0000 mon.vm06 (mon.0) 438 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761641+0000 mon.vm06 (mon.0) 439 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761641+0000 mon.vm06 (mon.0) 439 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761699+0000 mon.vm06 (mon.0) 440 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761699+0000 mon.vm06 (mon.0) 440 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761751+0000 mon.vm06 (mon.0) 441 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761751+0000 mon.vm06 (mon.0) 441 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761801+0000 mon.vm06 (mon.0) 442 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761801+0000 mon.vm06 (mon.0) 442 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761848+0000 mon.vm06 (mon.0) 443 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761848+0000 mon.vm06 (mon.0) 443 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761899+0000 mon.vm06 (mon.0) 444 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.761899+0000 mon.vm06 (mon.0) 444 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.989230+0000 mon.vm06 (mon.0) 445 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.989230+0000 mon.vm06 (mon.0) 445 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd={"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.992546+0000 mon.vm06 (mon.0) 446 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"}]': finished 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.992546+0000 mon.vm06 (mon.0) 446 : audit [INF] from='client.? 192.168.123.106:0/1591899919' entity='client.bootstrap-osd' cmd='[{"prefix": "osd new", "uuid": "2eefc3e2-2c3a-4ac3-846b-ab224d110bd8"}]': finished 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: cluster 2026-04-15T13:34:43.995526+0000 mon.vm06 (mon.0) 447 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: cluster 2026-04-15T13:34:43.995526+0000 mon.vm06 (mon.0) 447 : cluster [DBG] osdmap e13: 8 total, 0 up, 8 in 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995679+0000 mon.vm06 (mon.0) 448 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995679+0000 mon.vm06 (mon.0) 448 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995801+0000 mon.vm06 (mon.0) 449 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995801+0000 mon.vm06 (mon.0) 449 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995872+0000 mon.vm06 (mon.0) 450 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995872+0000 mon.vm06 (mon.0) 450 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995945+0000 mon.vm06 (mon.0) 451 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.995945+0000 mon.vm06 (mon.0) 451 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996005+0000 mon.vm06 (mon.0) 452 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996005+0000 mon.vm06 (mon.0) 452 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996066+0000 mon.vm06 (mon.0) 453 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996066+0000 mon.vm06 (mon.0) 453 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996127+0000 mon.vm06 (mon.0) 454 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996127+0000 mon.vm06 (mon.0) 454 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996193+0000 mon.vm06 (mon.0) 455 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:44.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:44 vm06 bash[28114]: audit 2026-04-15T13:34:43.996193+0000 mon.vm06 (mon.0) 455 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:45.366 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: cluster 2026-04-15T13:34:44.167581+0000 mgr.vm06.qbbldl (mgr.14229) 69 : cluster [DBG] pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: cluster 2026-04-15T13:34:44.167581+0000 mgr.vm06.qbbldl (mgr.14229) 69 : cluster [DBG] pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: audit 2026-04-15T13:34:44.303166+0000 mon.vm06 (mon.0) 456 : audit [DBG] from='client.? 192.168.123.106:0/355717655' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: audit 2026-04-15T13:34:44.303166+0000 mon.vm06 (mon.0) 456 : audit [DBG] from='client.? 192.168.123.106:0/355717655' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: audit 2026-04-15T13:34:44.526122+0000 mon.vm09 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.109:0/2556749932' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: audit 2026-04-15T13:34:44.526122+0000 mon.vm09 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.109:0/2556749932' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: audit 2026-04-15T13:34:44.729743+0000 mon.vm06 (mon.0) 457 : audit [DBG] from='client.? 192.168.123.106:0/2108748672' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:45 vm09 bash[34466]: audit 2026-04-15T13:34:44.729743+0000 mon.vm06 (mon.0) 457 : audit [DBG] from='client.? 192.168.123.106:0/2108748672' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: cluster 2026-04-15T13:34:44.167581+0000 mgr.vm06.qbbldl (mgr.14229) 69 : cluster [DBG] pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: cluster 2026-04-15T13:34:44.167581+0000 mgr.vm06.qbbldl (mgr.14229) 69 : cluster [DBG] pgmap v19: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: audit 2026-04-15T13:34:44.303166+0000 mon.vm06 (mon.0) 456 : audit [DBG] from='client.? 192.168.123.106:0/355717655' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: audit 2026-04-15T13:34:44.303166+0000 mon.vm06 (mon.0) 456 : audit [DBG] from='client.? 192.168.123.106:0/355717655' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: audit 2026-04-15T13:34:44.526122+0000 mon.vm09 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.109:0/2556749932' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: audit 2026-04-15T13:34:44.526122+0000 mon.vm09 (mon.1) 9 : audit [DBG] from='client.? 192.168.123.109:0/2556749932' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: audit 2026-04-15T13:34:44.729743+0000 mon.vm06 (mon.0) 457 : audit [DBG] from='client.? 192.168.123.106:0/2108748672' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.638 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:45 vm06 bash[28114]: audit 2026-04-15T13:34:44.729743+0000 mon.vm06 (mon.0) 457 : audit [DBG] from='client.? 192.168.123.106:0/2108748672' entity='client.bootstrap-osd' cmd={"prefix": "mon getmap"} : dispatch 2026-04-15T13:34:45.654 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:46.087 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:46.148 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:46.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:46 vm06 bash[28114]: audit 2026-04-15T13:34:46.079267+0000 mon.vm06 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.106:0/334487800' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:46.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:46 vm06 bash[28114]: audit 2026-04-15T13:34:46.079267+0000 mon.vm06 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.106:0/334487800' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:46.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:46 vm09 bash[34466]: audit 2026-04-15T13:34:46.079267+0000 mon.vm06 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.106:0/334487800' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:46.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:46 vm09 bash[34466]: audit 2026-04-15T13:34:46.079267+0000 mon.vm06 (mon.0) 458 : audit [DBG] from='client.? 192.168.123.106:0/334487800' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:47.149 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:47.459 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: cluster 2026-04-15T13:34:46.167761+0000 mgr.vm06.qbbldl (mgr.14229) 70 : cluster [DBG] pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: cluster 2026-04-15T13:34:46.167761+0000 mgr.vm06.qbbldl (mgr.14229) 70 : cluster [DBG] pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.463057+0000 mon.vm06 (mon.0) 459 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.463057+0000 mon.vm06 (mon.0) 459 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.463643+0000 mon.vm06 (mon.0) 460 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.463643+0000 mon.vm06 (mon.0) 460 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.811911+0000 mon.vm06 (mon.0) 461 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.811911+0000 mon.vm06 (mon.0) 461 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.812469+0000 mon.vm06 (mon.0) 462 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 bash[28114]: audit 2026-04-15T13:34:46.812469+0000 mon.vm06 (mon.0) 462 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: cluster 2026-04-15T13:34:46.167761+0000 mgr.vm06.qbbldl (mgr.14229) 70 : cluster [DBG] pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: cluster 2026-04-15T13:34:46.167761+0000 mgr.vm06.qbbldl (mgr.14229) 70 : cluster [DBG] pgmap v20: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.463057+0000 mon.vm06 (mon.0) 459 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.463057+0000 mon.vm06 (mon.0) 459 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.0"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.463643+0000 mon.vm06 (mon.0) 460 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.463643+0000 mon.vm06 (mon.0) 460 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.811911+0000 mon.vm06 (mon.0) 461 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.811911+0000 mon.vm06 (mon.0) 461 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.1"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.812469+0000 mon.vm06 (mon.0) 462 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.556 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 bash[34466]: audit 2026-04-15T13:34:46.812469+0000 mon.vm06 (mon.0) 462 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:47.826 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:47.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:47.847 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:47 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:48.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:47 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:48.103 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: cephadm 2026-04-15T13:34:46.464190+0000 mgr.vm06.qbbldl (mgr.14229) 71 : cephadm [INF] Deploying daemon osd.0 on vm09 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: cephadm 2026-04-15T13:34:46.464190+0000 mgr.vm06.qbbldl (mgr.14229) 71 : cephadm [INF] Deploying daemon osd.0 on vm09 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: cephadm 2026-04-15T13:34:46.812966+0000 mgr.vm06.qbbldl (mgr.14229) 72 : cephadm [INF] Deploying daemon osd.1 on vm06 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: cephadm 2026-04-15T13:34:46.812966+0000 mgr.vm06.qbbldl (mgr.14229) 72 : cephadm [INF] Deploying daemon osd.1 on vm06 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.824311+0000 mon.vm06 (mon.0) 463 : audit [DBG] from='client.? 192.168.123.106:0/401153158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.824311+0000 mon.vm06 (mon.0) 463 : audit [DBG] from='client.? 192.168.123.106:0/401153158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.876684+0000 mon.vm06 (mon.0) 464 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.876684+0000 mon.vm06 (mon.0) 464 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.880788+0000 mon.vm06 (mon.0) 465 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.880788+0000 mon.vm06 (mon.0) 465 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.881385+0000 mon.vm06 (mon.0) 466 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.881385+0000 mon.vm06 (mon.0) 466 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.881846+0000 mon.vm06 (mon.0) 467 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:48.361 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:48 vm06 bash[28114]: audit 2026-04-15T13:34:47.881846+0000 mon.vm06 (mon.0) 467 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: cephadm 2026-04-15T13:34:46.464190+0000 mgr.vm06.qbbldl (mgr.14229) 71 : cephadm [INF] Deploying daemon osd.0 on vm09 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: cephadm 2026-04-15T13:34:46.464190+0000 mgr.vm06.qbbldl (mgr.14229) 71 : cephadm [INF] Deploying daemon osd.0 on vm09 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: cephadm 2026-04-15T13:34:46.812966+0000 mgr.vm06.qbbldl (mgr.14229) 72 : cephadm [INF] Deploying daemon osd.1 on vm06 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: cephadm 2026-04-15T13:34:46.812966+0000 mgr.vm06.qbbldl (mgr.14229) 72 : cephadm [INF] Deploying daemon osd.1 on vm06 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.824311+0000 mon.vm06 (mon.0) 463 : audit [DBG] from='client.? 192.168.123.106:0/401153158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.824311+0000 mon.vm06 (mon.0) 463 : audit [DBG] from='client.? 192.168.123.106:0/401153158' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.876684+0000 mon.vm06 (mon.0) 464 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.876684+0000 mon.vm06 (mon.0) 464 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.880788+0000 mon.vm06 (mon.0) 465 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.880788+0000 mon.vm06 (mon.0) 465 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.881385+0000 mon.vm06 (mon.0) 466 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.881385+0000 mon.vm06 (mon.0) 466 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.2"} : dispatch 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.881846+0000 mon.vm06 (mon.0) 467 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:48 vm09 bash[34466]: audit 2026-04-15T13:34:47.881846+0000 mon.vm06 (mon.0) 467 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:49.105 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:49.318 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:49.318 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:49.407 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: cephadm 2026-04-15T13:34:47.882226+0000 mgr.vm06.qbbldl (mgr.14229) 73 : cephadm [INF] Deploying daemon osd.2 on vm09 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: cephadm 2026-04-15T13:34:47.882226+0000 mgr.vm06.qbbldl (mgr.14229) 73 : cephadm [INF] Deploying daemon osd.2 on vm09 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: cluster 2026-04-15T13:34:48.167994+0000 mgr.vm06.qbbldl (mgr.14229) 74 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: cluster 2026-04-15T13:34:48.167994+0000 mgr.vm06.qbbldl (mgr.14229) 74 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.316713+0000 mon.vm06 (mon.0) 468 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.316713+0000 mon.vm06 (mon.0) 468 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.322844+0000 mon.vm06 (mon.0) 469 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.322844+0000 mon.vm06 (mon.0) 469 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.323715+0000 mon.vm06 (mon.0) 470 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.323715+0000 mon.vm06 (mon.0) 470 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.324267+0000 mon.vm06 (mon.0) 471 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: audit 2026-04-15T13:34:48.324267+0000 mon.vm06 (mon.0) 471 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: cephadm 2026-04-15T13:34:48.327530+0000 mgr.vm06.qbbldl (mgr.14229) 75 : cephadm [INF] Deploying daemon osd.3 on vm06 2026-04-15T13:34:49.416 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 bash[28114]: cephadm 2026-04-15T13:34:48.327530+0000 mgr.vm06.qbbldl (mgr.14229) 75 : cephadm [INF] Deploying daemon osd.3 on vm06 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: cephadm 2026-04-15T13:34:47.882226+0000 mgr.vm06.qbbldl (mgr.14229) 73 : cephadm [INF] Deploying daemon osd.2 on vm09 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: cephadm 2026-04-15T13:34:47.882226+0000 mgr.vm06.qbbldl (mgr.14229) 73 : cephadm [INF] Deploying daemon osd.2 on vm09 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: cluster 2026-04-15T13:34:48.167994+0000 mgr.vm06.qbbldl (mgr.14229) 74 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: cluster 2026-04-15T13:34:48.167994+0000 mgr.vm06.qbbldl (mgr.14229) 74 : cluster [DBG] pgmap v21: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.316713+0000 mon.vm06 (mon.0) 468 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.316713+0000 mon.vm06 (mon.0) 468 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.322844+0000 mon.vm06 (mon.0) 469 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.322844+0000 mon.vm06 (mon.0) 469 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.323715+0000 mon.vm06 (mon.0) 470 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.323715+0000 mon.vm06 (mon.0) 470 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.3"} : dispatch 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.324267+0000 mon.vm06 (mon.0) 471 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: audit 2026-04-15T13:34:48.324267+0000 mon.vm06 (mon.0) 471 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: cephadm 2026-04-15T13:34:48.327530+0000 mgr.vm06.qbbldl (mgr.14229) 75 : cephadm [INF] Deploying daemon osd.3 on vm06 2026-04-15T13:34:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:49 vm09 bash[34466]: cephadm 2026-04-15T13:34:48.327530+0000 mgr.vm06.qbbldl (mgr.14229) 75 : cephadm [INF] Deploying daemon osd.3 on vm06 2026-04-15T13:34:49.761 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:50.179 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:49 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:50.186 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:50.286 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":13,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.386015+0000 mon.vm06 (mon.0) 472 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.386015+0000 mon.vm06 (mon.0) 472 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.395369+0000 mon.vm06 (mon.0) 473 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.395369+0000 mon.vm06 (mon.0) 473 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.396321+0000 mon.vm06 (mon.0) 474 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.396321+0000 mon.vm06 (mon.0) 474 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.397078+0000 mon.vm06 (mon.0) 475 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.397078+0000 mon.vm06 (mon.0) 475 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: cephadm 2026-04-15T13:34:49.397456+0000 mgr.vm06.qbbldl (mgr.14229) 76 : cephadm [INF] Deploying daemon osd.4 on vm09 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: cephadm 2026-04-15T13:34:49.397456+0000 mgr.vm06.qbbldl (mgr.14229) 76 : cephadm [INF] Deploying daemon osd.4 on vm09 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.894256+0000 mon.vm06 (mon.0) 476 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.894256+0000 mon.vm06 (mon.0) 476 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.899143+0000 mon.vm06 (mon.0) 477 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.899143+0000 mon.vm06 (mon.0) 477 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.900164+0000 mon.vm06 (mon.0) 478 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.900164+0000 mon.vm06 (mon.0) 478 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.901333+0000 mon.vm06 (mon.0) 479 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:49.901333+0000 mon.vm06 (mon.0) 479 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: cephadm 2026-04-15T13:34:49.902259+0000 mgr.vm06.qbbldl (mgr.14229) 77 : cephadm [INF] Deploying daemon osd.5 on vm06 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: cephadm 2026-04-15T13:34:49.902259+0000 mgr.vm06.qbbldl (mgr.14229) 77 : cephadm [INF] Deploying daemon osd.5 on vm06 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: cluster 2026-04-15T13:34:50.168246+0000 mgr.vm06.qbbldl (mgr.14229) 78 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: cluster 2026-04-15T13:34:50.168246+0000 mgr.vm06.qbbldl (mgr.14229) 78 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:50.184414+0000 mon.vm06 (mon.0) 480 : audit [DBG] from='client.? 192.168.123.106:0/1007400221' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:50.477 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:50 vm06 bash[28114]: audit 2026-04-15T13:34:50.184414+0000 mon.vm06 (mon.0) 480 : audit [DBG] from='client.? 192.168.123.106:0/1007400221' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.386015+0000 mon.vm06 (mon.0) 472 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.386015+0000 mon.vm06 (mon.0) 472 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.395369+0000 mon.vm06 (mon.0) 473 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.395369+0000 mon.vm06 (mon.0) 473 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.396321+0000 mon.vm06 (mon.0) 474 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.396321+0000 mon.vm06 (mon.0) 474 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.4"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.397078+0000 mon.vm06 (mon.0) 475 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.397078+0000 mon.vm06 (mon.0) 475 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: cephadm 2026-04-15T13:34:49.397456+0000 mgr.vm06.qbbldl (mgr.14229) 76 : cephadm [INF] Deploying daemon osd.4 on vm09 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: cephadm 2026-04-15T13:34:49.397456+0000 mgr.vm06.qbbldl (mgr.14229) 76 : cephadm [INF] Deploying daemon osd.4 on vm09 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.894256+0000 mon.vm06 (mon.0) 476 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.894256+0000 mon.vm06 (mon.0) 476 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.899143+0000 mon.vm06 (mon.0) 477 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.899143+0000 mon.vm06 (mon.0) 477 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.900164+0000 mon.vm06 (mon.0) 478 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.900164+0000 mon.vm06 (mon.0) 478 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.5"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.901333+0000 mon.vm06 (mon.0) 479 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:49.901333+0000 mon.vm06 (mon.0) 479 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: cephadm 2026-04-15T13:34:49.902259+0000 mgr.vm06.qbbldl (mgr.14229) 77 : cephadm [INF] Deploying daemon osd.5 on vm06 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: cephadm 2026-04-15T13:34:49.902259+0000 mgr.vm06.qbbldl (mgr.14229) 77 : cephadm [INF] Deploying daemon osd.5 on vm06 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: cluster 2026-04-15T13:34:50.168246+0000 mgr.vm06.qbbldl (mgr.14229) 78 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: cluster 2026-04-15T13:34:50.168246+0000 mgr.vm06.qbbldl (mgr.14229) 78 : cluster [DBG] pgmap v22: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:50.184414+0000 mon.vm06 (mon.0) 480 : audit [DBG] from='client.? 192.168.123.106:0/1007400221' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:50.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 bash[34466]: audit 2026-04-15T13:34:50.184414+0000 mon.vm06 (mon.0) 480 : audit [DBG] from='client.? 192.168.123.106:0/1007400221' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:50.949 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:50.949 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:50 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:51.288 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:51.311 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:51.546 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:51.572 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:52.071 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:52.077 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.984469+0000 mon.vm06 (mon.0) 481 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.984469+0000 mon.vm06 (mon.0) 481 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.991509+0000 mon.vm06 (mon.0) 482 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.991509+0000 mon.vm06 (mon.0) 482 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.992066+0000 mon.vm06 (mon.0) 483 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.992066+0000 mon.vm06 (mon.0) 483 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.992542+0000 mon.vm06 (mon.0) 484 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:50.992542+0000 mon.vm06 (mon.0) 484 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: cephadm 2026-04-15T13:34:50.993939+0000 mgr.vm06.qbbldl (mgr.14229) 79 : cephadm [INF] Deploying daemon osd.6 on vm09 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: cephadm 2026-04-15T13:34:50.993939+0000 mgr.vm06.qbbldl (mgr.14229) 79 : cephadm [INF] Deploying daemon osd.6 on vm09 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.296497+0000 mon.vm09 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.296497+0000 mon.vm09 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.301406+0000 mon.vm06 (mon.0) 485 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.301406+0000 mon.vm06 (mon.0) 485 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.591730+0000 mon.vm06 (mon.0) 486 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.591730+0000 mon.vm06 (mon.0) 486 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.596748+0000 mon.vm06 (mon.0) 487 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.596748+0000 mon.vm06 (mon.0) 487 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.599570+0000 mon.vm06 (mon.0) 488 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.599570+0000 mon.vm06 (mon.0) 488 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.600089+0000 mon.vm06 (mon.0) 489 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.600089+0000 mon.vm06 (mon.0) 489 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.910069+0000 mon.vm09 (mon.1) 11 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.910069+0000 mon.vm09 (mon.1) 11 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.912326+0000 mon.vm06 (mon.0) 490 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.078 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:51 vm09 bash[34466]: audit 2026-04-15T13:34:51.912326+0000 mon.vm06 (mon.0) 490 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.984469+0000 mon.vm06 (mon.0) 481 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.984469+0000 mon.vm06 (mon.0) 481 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.991509+0000 mon.vm06 (mon.0) 482 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.991509+0000 mon.vm06 (mon.0) 482 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.992066+0000 mon.vm06 (mon.0) 483 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.992066+0000 mon.vm06 (mon.0) 483 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.6"} : dispatch 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.992542+0000 mon.vm06 (mon.0) 484 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:50.992542+0000 mon.vm06 (mon.0) 484 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: cephadm 2026-04-15T13:34:50.993939+0000 mgr.vm06.qbbldl (mgr.14229) 79 : cephadm [INF] Deploying daemon osd.6 on vm09 2026-04-15T13:34:52.118 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: cephadm 2026-04-15T13:34:50.993939+0000 mgr.vm06.qbbldl (mgr.14229) 79 : cephadm [INF] Deploying daemon osd.6 on vm09 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.296497+0000 mon.vm09 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.296497+0000 mon.vm09 (mon.1) 10 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.301406+0000 mon.vm06 (mon.0) 485 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.301406+0000 mon.vm06 (mon.0) 485 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.591730+0000 mon.vm06 (mon.0) 486 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.591730+0000 mon.vm06 (mon.0) 486 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.596748+0000 mon.vm06 (mon.0) 487 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.596748+0000 mon.vm06 (mon.0) 487 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.599570+0000 mon.vm06 (mon.0) 488 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.599570+0000 mon.vm06 (mon.0) 488 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "osd.7"} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.600089+0000 mon.vm06 (mon.0) 489 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.600089+0000 mon.vm06 (mon.0) 489 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.910069+0000 mon.vm09 (mon.1) 11 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.910069+0000 mon.vm09 (mon.1) 11 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.912326+0000 mon.vm06 (mon.0) 490 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.119 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:51 vm06 bash[28114]: audit 2026-04-15T13:34:51.912326+0000 mon.vm06 (mon.0) 490 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]} : dispatch 2026-04-15T13:34:52.170 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":14,"num_osds":8,"num_up_osds":0,"osd_up_since":0,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:52.341 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:52 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:52 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:53.171 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:52 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: cephadm 2026-04-15T13:34:51.600535+0000 mgr.vm06.qbbldl (mgr.14229) 80 : cephadm [INF] Deploying daemon osd.7 on vm06 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: cephadm 2026-04-15T13:34:51.600535+0000 mgr.vm06.qbbldl (mgr.14229) 80 : cephadm [INF] Deploying daemon osd.7 on vm06 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.994054+0000 mon.vm06 (mon.0) 491 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.994054+0000 mon.vm06 (mon.0) 491 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.994113+0000 mon.vm06 (mon.0) 492 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.994113+0000 mon.vm06 (mon.0) 492 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995310+0000 mon.vm09 (mon.1) 12 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995310+0000 mon.vm09 (mon.1) 12 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: cluster 2026-04-15T13:34:51.995368+0000 mon.vm06 (mon.0) 493 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: cluster 2026-04-15T13:34:51.995368+0000 mon.vm06 (mon.0) 493 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995492+0000 mon.vm09 (mon.1) 13 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995492+0000 mon.vm09 (mon.1) 13 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995582+0000 mon.vm06 (mon.0) 494 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995582+0000 mon.vm06 (mon.0) 494 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995699+0000 mon.vm06 (mon.0) 495 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995699+0000 mon.vm06 (mon.0) 495 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995776+0000 mon.vm06 (mon.0) 496 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995776+0000 mon.vm06 (mon.0) 496 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995847+0000 mon.vm06 (mon.0) 497 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995847+0000 mon.vm06 (mon.0) 497 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995934+0000 mon.vm06 (mon.0) 498 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.995934+0000 mon.vm06 (mon.0) 498 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.996006+0000 mon.vm06 (mon.0) 499 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.996006+0000 mon.vm06 (mon.0) 499 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.996078+0000 mon.vm06 (mon.0) 500 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.996078+0000 mon.vm06 (mon.0) 500 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.996152+0000 mon.vm06 (mon.0) 501 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.996152+0000 mon.vm06 (mon.0) 501 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.997531+0000 mon.vm06 (mon.0) 502 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.997531+0000 mon.vm06 (mon.0) 502 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.997656+0000 mon.vm06 (mon.0) 503 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:51.997656+0000 mon.vm06 (mon.0) 503 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.066482+0000 mon.vm09 (mon.1) 14 : audit [DBG] from='client.? 192.168.123.106:0/1849593236' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.066482+0000 mon.vm09 (mon.1) 14 : audit [DBG] from='client.? 192.168.123.106:0/1849593236' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: cluster 2026-04-15T13:34:52.168878+0000 mgr.vm06.qbbldl (mgr.14229) 81 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: cluster 2026-04-15T13:34:52.168878+0000 mgr.vm06.qbbldl (mgr.14229) 81 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.603891+0000 mon.vm06 (mon.0) 504 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.603891+0000 mon.vm06 (mon.0) 504 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.608734+0000 mon.vm06 (mon.0) 505 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.608734+0000 mon.vm06 (mon.0) 505 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.704953+0000 mon.vm09 (mon.1) 15 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.704953+0000 mon.vm09 (mon.1) 15 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.707393+0000 mon.vm06 (mon.0) 506 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 bash[28114]: audit 2026-04-15T13:34:52.707393+0000 mon.vm06 (mon.0) 506 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.202 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: cephadm 2026-04-15T13:34:51.600535+0000 mgr.vm06.qbbldl (mgr.14229) 80 : cephadm [INF] Deploying daemon osd.7 on vm06 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: cephadm 2026-04-15T13:34:51.600535+0000 mgr.vm06.qbbldl (mgr.14229) 80 : cephadm [INF] Deploying daemon osd.7 on vm06 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.994054+0000 mon.vm06 (mon.0) 491 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.994054+0000 mon.vm06 (mon.0) 491 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["0"]}]': finished 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.994113+0000 mon.vm06 (mon.0) 492 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.994113+0000 mon.vm06 (mon.0) 492 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["1"]}]': finished 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995310+0000 mon.vm09 (mon.1) 12 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995310+0000 mon.vm09 (mon.1) 12 : audit [INF] from='osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834]' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: cluster 2026-04-15T13:34:51.995368+0000 mon.vm06 (mon.0) 493 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: cluster 2026-04-15T13:34:51.995368+0000 mon.vm06 (mon.0) 493 : cluster [DBG] osdmap e14: 8 total, 0 up, 8 in 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995492+0000 mon.vm09 (mon.1) 13 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995492+0000 mon.vm09 (mon.1) 13 : audit [INF] from='osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235]' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995582+0000 mon.vm06 (mon.0) 494 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995582+0000 mon.vm06 (mon.0) 494 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995699+0000 mon.vm06 (mon.0) 495 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995699+0000 mon.vm06 (mon.0) 495 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995776+0000 mon.vm06 (mon.0) 496 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995776+0000 mon.vm06 (mon.0) 496 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995847+0000 mon.vm06 (mon.0) 497 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995847+0000 mon.vm06 (mon.0) 497 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:53.215 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995934+0000 mon.vm06 (mon.0) 498 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.995934+0000 mon.vm06 (mon.0) 498 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.996006+0000 mon.vm06 (mon.0) 499 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.996006+0000 mon.vm06 (mon.0) 499 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.996078+0000 mon.vm06 (mon.0) 500 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.996078+0000 mon.vm06 (mon.0) 500 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.996152+0000 mon.vm06 (mon.0) 501 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.996152+0000 mon.vm06 (mon.0) 501 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.997531+0000 mon.vm06 (mon.0) 502 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.997531+0000 mon.vm06 (mon.0) 502 : audit [INF] from='osd.0 ' entity='osd.0' cmd={"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.997656+0000 mon.vm06 (mon.0) 503 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:51.997656+0000 mon.vm06 (mon.0) 503 : audit [INF] from='osd.1 ' entity='osd.1' cmd={"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.066482+0000 mon.vm09 (mon.1) 14 : audit [DBG] from='client.? 192.168.123.106:0/1849593236' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.066482+0000 mon.vm09 (mon.1) 14 : audit [DBG] from='client.? 192.168.123.106:0/1849593236' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: cluster 2026-04-15T13:34:52.168878+0000 mgr.vm06.qbbldl (mgr.14229) 81 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: cluster 2026-04-15T13:34:52.168878+0000 mgr.vm06.qbbldl (mgr.14229) 81 : cluster [DBG] pgmap v24: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.603891+0000 mon.vm06 (mon.0) 504 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.603891+0000 mon.vm06 (mon.0) 504 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.608734+0000 mon.vm06 (mon.0) 505 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.608734+0000 mon.vm06 (mon.0) 505 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.704953+0000 mon.vm09 (mon.1) 15 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.704953+0000 mon.vm09 (mon.1) 15 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.707393+0000 mon.vm06 (mon.0) 506 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.216 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:53 vm09 bash[34466]: audit 2026-04-15T13:34:52.707393+0000 mon.vm06 (mon.0) 506 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]} : dispatch 2026-04-15T13:34:53.467 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:53.498 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:53 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:34:54.051 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:54.225 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":16,"num_osds":8,"num_up_osds":2,"osd_up_since":1776260093,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.996795+0000 mon.vm06 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.996795+0000 mon.vm06 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.996848+0000 mon.vm06 (mon.0) 508 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.996848+0000 mon.vm06 (mon.0) 508 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.996873+0000 mon.vm06 (mon.0) 509 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.996873+0000 mon.vm06 (mon.0) 509 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.997832+0000 mon.vm09 (mon.1) 16 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:52.997832+0000 mon.vm09 (mon.1) 16 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:52.998705+0000 mon.vm06 (mon.0) 510 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:52.998705+0000 mon.vm06 (mon.0) 510 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003652+0000 mon.vm06 (mon.0) 511 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003652+0000 mon.vm06 (mon.0) 511 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003759+0000 mon.vm06 (mon.0) 512 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003759+0000 mon.vm06 (mon.0) 512 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003856+0000 mon.vm06 (mon.0) 513 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003856+0000 mon.vm06 (mon.0) 513 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.279 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003963+0000 mon.vm06 (mon.0) 514 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.003963+0000 mon.vm06 (mon.0) 514 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004021+0000 mon.vm06 (mon.0) 515 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004021+0000 mon.vm06 (mon.0) 515 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004064+0000 mon.vm06 (mon.0) 516 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004064+0000 mon.vm06 (mon.0) 516 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004106+0000 mon.vm06 (mon.0) 517 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004106+0000 mon.vm06 (mon.0) 517 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004147+0000 mon.vm06 (mon.0) 518 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004147+0000 mon.vm06 (mon.0) 518 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004190+0000 mon.vm06 (mon.0) 519 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.004190+0000 mon.vm06 (mon.0) 519 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.405075+0000 mon.vm06 (mon.0) 520 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.405075+0000 mon.vm06 (mon.0) 520 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.468642+0000 mon.vm06 (mon.0) 521 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.468642+0000 mon.vm06 (mon.0) 521 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.480893+0000 mon.vm06 (mon.0) 522 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.480893+0000 mon.vm06 (mon.0) 522 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.520000+0000 mon.vm06 (mon.0) 523 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.520000+0000 mon.vm06 (mon.0) 523 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.585753+0000 mon.vm06 (mon.0) 524 : audit [INF] from='osd.1 ' entity='osd.1' 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:53.585753+0000 mon.vm06 (mon.0) 524 : audit [INF] from='osd.1 ' entity='osd.1' 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.002215+0000 mon.vm06 (mon.0) 525 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.002215+0000 mon.vm06 (mon.0) 525 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.003250+0000 mon.vm06 (mon.0) 526 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.003250+0000 mon.vm06 (mon.0) 526 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.004938+0000 mon.vm06 (mon.0) 527 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.004938+0000 mon.vm06 (mon.0) 527 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.004992+0000 mon.vm06 (mon.0) 528 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.004992+0000 mon.vm06 (mon.0) 528 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:54.007057+0000 mon.vm06 (mon.0) 529 : cluster [INF] osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834] boot 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:54.007057+0000 mon.vm06 (mon.0) 529 : cluster [INF] osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834] boot 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:54.007089+0000 mon.vm06 (mon.0) 530 : cluster [INF] osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235] boot 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:54.007089+0000 mon.vm06 (mon.0) 530 : cluster [INF] osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235] boot 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:54.007108+0000 mon.vm06 (mon.0) 531 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: cluster 2026-04-15T13:34:54.007108+0000 mon.vm06 (mon.0) 531 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007252+0000 mon.vm06 (mon.0) 532 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007252+0000 mon.vm06 (mon.0) 532 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007337+0000 mon.vm06 (mon.0) 533 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007337+0000 mon.vm06 (mon.0) 533 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007402+0000 mon.vm06 (mon.0) 534 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007402+0000 mon.vm06 (mon.0) 534 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007580+0000 mon.vm06 (mon.0) 535 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007580+0000 mon.vm06 (mon.0) 535 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007623+0000 mon.vm06 (mon.0) 536 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007623+0000 mon.vm06 (mon.0) 536 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007660+0000 mon.vm06 (mon.0) 537 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007660+0000 mon.vm06 (mon.0) 537 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007695+0000 mon.vm06 (mon.0) 538 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007695+0000 mon.vm06 (mon.0) 538 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007729+0000 mon.vm06 (mon.0) 539 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.007729+0000 mon.vm06 (mon.0) 539 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.008018+0000 mon.vm06 (mon.0) 540 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.280 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:54 vm06 bash[28114]: audit 2026-04-15T13:34:54.008018+0000 mon.vm06 (mon.0) 540 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.996795+0000 mon.vm06 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.996795+0000 mon.vm06 (mon.0) 507 : audit [INF] from='osd.0 ' entity='osd.0' cmd='[{"prefix": "osd crush create-or-move", "id": 0, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.996848+0000 mon.vm06 (mon.0) 508 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.996848+0000 mon.vm06 (mon.0) 508 : audit [INF] from='osd.1 ' entity='osd.1' cmd='[{"prefix": "osd crush create-or-move", "id": 1, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.996873+0000 mon.vm06 (mon.0) 509 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.996873+0000 mon.vm06 (mon.0) 509 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["2"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.997832+0000 mon.vm09 (mon.1) 16 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:52.997832+0000 mon.vm09 (mon.1) 16 : audit [INF] from='osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882]' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:52.998705+0000 mon.vm06 (mon.0) 510 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:52.998705+0000 mon.vm06 (mon.0) 510 : cluster [DBG] osdmap e15: 8 total, 0 up, 8 in 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003652+0000 mon.vm06 (mon.0) 511 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003652+0000 mon.vm06 (mon.0) 511 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003759+0000 mon.vm06 (mon.0) 512 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003759+0000 mon.vm06 (mon.0) 512 : audit [INF] from='osd.2 ' entity='osd.2' cmd={"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003856+0000 mon.vm06 (mon.0) 513 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003856+0000 mon.vm06 (mon.0) 513 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003963+0000 mon.vm06 (mon.0) 514 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.003963+0000 mon.vm06 (mon.0) 514 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004021+0000 mon.vm06 (mon.0) 515 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004021+0000 mon.vm06 (mon.0) 515 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004064+0000 mon.vm06 (mon.0) 516 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004064+0000 mon.vm06 (mon.0) 516 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004106+0000 mon.vm06 (mon.0) 517 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004106+0000 mon.vm06 (mon.0) 517 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004147+0000 mon.vm06 (mon.0) 518 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004147+0000 mon.vm06 (mon.0) 518 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004190+0000 mon.vm06 (mon.0) 519 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.004190+0000 mon.vm06 (mon.0) 519 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.405075+0000 mon.vm06 (mon.0) 520 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.405075+0000 mon.vm06 (mon.0) 520 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.468642+0000 mon.vm06 (mon.0) 521 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.468642+0000 mon.vm06 (mon.0) 521 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.480893+0000 mon.vm06 (mon.0) 522 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.480893+0000 mon.vm06 (mon.0) 522 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.520000+0000 mon.vm06 (mon.0) 523 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.520000+0000 mon.vm06 (mon.0) 523 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.585753+0000 mon.vm06 (mon.0) 524 : audit [INF] from='osd.1 ' entity='osd.1' 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:53.585753+0000 mon.vm06 (mon.0) 524 : audit [INF] from='osd.1 ' entity='osd.1' 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.002215+0000 mon.vm06 (mon.0) 525 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.002215+0000 mon.vm06 (mon.0) 525 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.003250+0000 mon.vm06 (mon.0) 526 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.003250+0000 mon.vm06 (mon.0) 526 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.004938+0000 mon.vm06 (mon.0) 527 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.004938+0000 mon.vm06 (mon.0) 527 : audit [INF] from='osd.2 ' entity='osd.2' cmd='[{"prefix": "osd crush create-or-move", "id": 2, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.004992+0000 mon.vm06 (mon.0) 528 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.004992+0000 mon.vm06 (mon.0) 528 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["3"]}]': finished 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:54.007057+0000 mon.vm06 (mon.0) 529 : cluster [INF] osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834] boot 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:54.007057+0000 mon.vm06 (mon.0) 529 : cluster [INF] osd.0 [v2:192.168.123.109:6800/1671722834,v1:192.168.123.109:6801/1671722834] boot 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:54.007089+0000 mon.vm06 (mon.0) 530 : cluster [INF] osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235] boot 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:54.007089+0000 mon.vm06 (mon.0) 530 : cluster [INF] osd.1 [v2:192.168.123.106:6802/462013235,v1:192.168.123.106:6803/462013235] boot 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:54.007108+0000 mon.vm06 (mon.0) 531 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: cluster 2026-04-15T13:34:54.007108+0000 mon.vm06 (mon.0) 531 : cluster [DBG] osdmap e16: 8 total, 2 up, 8 in 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007252+0000 mon.vm06 (mon.0) 532 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007252+0000 mon.vm06 (mon.0) 532 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd={"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007337+0000 mon.vm06 (mon.0) 533 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007337+0000 mon.vm06 (mon.0) 533 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 0} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007402+0000 mon.vm06 (mon.0) 534 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007402+0000 mon.vm06 (mon.0) 534 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 1} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007580+0000 mon.vm06 (mon.0) 535 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007580+0000 mon.vm06 (mon.0) 535 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007623+0000 mon.vm06 (mon.0) 536 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007623+0000 mon.vm06 (mon.0) 536 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007660+0000 mon.vm06 (mon.0) 537 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007660+0000 mon.vm06 (mon.0) 537 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007695+0000 mon.vm06 (mon.0) 538 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007695+0000 mon.vm06 (mon.0) 538 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:54.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007729+0000 mon.vm06 (mon.0) 539 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.007729+0000 mon.vm06 (mon.0) 539 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:54.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.008018+0000 mon.vm06 (mon.0) 540 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:54.361 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:54 vm09 bash[34466]: audit 2026-04-15T13:34:54.008018+0000 mon.vm06 (mon.0) 540 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.336619+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.336619+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.338456+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.338456+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.946097+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.946097+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.946110+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:52.946110+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.016004+0000 mon.vm06 (mon.0) 541 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.016004+0000 mon.vm06 (mon.0) 541 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.016611+0000 mon.vm09 (mon.1) 17 : audit [DBG] from='client.? 192.168.123.106:0/1927194477' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.016611+0000 mon.vm09 (mon.1) 17 : audit [DBG] from='client.? 192.168.123.106:0/1927194477' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:54.169049+0000 mgr.vm06.qbbldl (mgr.14229) 82 : cluster [DBG] pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:54.169049+0000 mgr.vm06.qbbldl (mgr.14229) 82 : cluster [DBG] pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.581563+0000 mon.vm06 (mon.0) 542 : audit [INF] from='osd.2 ' entity='osd.2' 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.581563+0000 mon.vm06 (mon.0) 542 : audit [INF] from='osd.2 ' entity='osd.2' 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.665827+0000 mon.vm09 (mon.1) 18 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.665827+0000 mon.vm09 (mon.1) 18 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.668008+0000 mon.vm06 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:54.668008+0000 mon.vm06 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.010063+0000 mon.vm06 (mon.0) 544 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.010063+0000 mon.vm06 (mon.0) 544 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.010141+0000 mon.vm06 (mon.0) 545 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.010141+0000 mon.vm06 (mon.0) 545 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:55.012879+0000 mon.vm06 (mon.0) 546 : cluster [INF] osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882] boot 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:55.012879+0000 mon.vm06 (mon.0) 546 : cluster [INF] osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882] boot 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:55.012908+0000 mon.vm06 (mon.0) 547 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: cluster 2026-04-15T13:34:55.012908+0000 mon.vm06 (mon.0) 547 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013520+0000 mon.vm06 (mon.0) 548 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013520+0000 mon.vm06 (mon.0) 548 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013613+0000 mon.vm06 (mon.0) 549 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013613+0000 mon.vm06 (mon.0) 549 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013662+0000 mon.vm06 (mon.0) 550 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013662+0000 mon.vm06 (mon.0) 550 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013706+0000 mon.vm06 (mon.0) 551 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013706+0000 mon.vm06 (mon.0) 551 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013755+0000 mon.vm06 (mon.0) 552 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013755+0000 mon.vm06 (mon.0) 552 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013812+0000 mon.vm06 (mon.0) 553 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.013812+0000 mon.vm06 (mon.0) 553 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.017956+0000 mon.vm06 (mon.0) 554 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.017956+0000 mon.vm06 (mon.0) 554 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.017986+0000 mon.vm09 (mon.1) 19 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.017986+0000 mon.vm09 (mon.1) 19 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.020123+0000 mon.vm06 (mon.0) 555 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.144 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:55 vm09 bash[34466]: audit 2026-04-15T13:34:55.020123+0000 mon.vm06 (mon.0) 555 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.226 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:55.500 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.336619+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.500 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.336619+0000 osd.0 (osd.0) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.500 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.338456+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.500 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.338456+0000 osd.0 (osd.0) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.500 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.946097+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.946097+0000 osd.1 (osd.1) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.946110+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:52.946110+0000 osd.1 (osd.1) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.016004+0000 mon.vm06 (mon.0) 541 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.016004+0000 mon.vm06 (mon.0) 541 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.016611+0000 mon.vm09 (mon.1) 17 : audit [DBG] from='client.? 192.168.123.106:0/1927194477' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.016611+0000 mon.vm09 (mon.1) 17 : audit [DBG] from='client.? 192.168.123.106:0/1927194477' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:54.169049+0000 mgr.vm06.qbbldl (mgr.14229) 82 : cluster [DBG] pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:54.169049+0000 mgr.vm06.qbbldl (mgr.14229) 82 : cluster [DBG] pgmap v27: 0 pgs: ; 0 B data, 0 B used, 0 B / 0 B avail 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.581563+0000 mon.vm06 (mon.0) 542 : audit [INF] from='osd.2 ' entity='osd.2' 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.581563+0000 mon.vm06 (mon.0) 542 : audit [INF] from='osd.2 ' entity='osd.2' 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.665827+0000 mon.vm09 (mon.1) 18 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.665827+0000 mon.vm09 (mon.1) 18 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.668008+0000 mon.vm06 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:54.668008+0000 mon.vm06 (mon.0) 543 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.010063+0000 mon.vm06 (mon.0) 544 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.010063+0000 mon.vm06 (mon.0) 544 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' cmd='[{"prefix": "osd crush create-or-move", "id": 3, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.010141+0000 mon.vm06 (mon.0) 545 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.010141+0000 mon.vm06 (mon.0) 545 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["4"]}]': finished 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:55.012879+0000 mon.vm06 (mon.0) 546 : cluster [INF] osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882] boot 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:55.012879+0000 mon.vm06 (mon.0) 546 : cluster [INF] osd.2 [v2:192.168.123.109:6808/1579250882,v1:192.168.123.109:6809/1579250882] boot 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:55.012908+0000 mon.vm06 (mon.0) 547 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: cluster 2026-04-15T13:34:55.012908+0000 mon.vm06 (mon.0) 547 : cluster [DBG] osdmap e17: 8 total, 3 up, 8 in 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013520+0000 mon.vm06 (mon.0) 548 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013520+0000 mon.vm06 (mon.0) 548 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 2} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013613+0000 mon.vm06 (mon.0) 549 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013613+0000 mon.vm06 (mon.0) 549 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013662+0000 mon.vm06 (mon.0) 550 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013662+0000 mon.vm06 (mon.0) 550 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013706+0000 mon.vm06 (mon.0) 551 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013706+0000 mon.vm06 (mon.0) 551 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013755+0000 mon.vm06 (mon.0) 552 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013755+0000 mon.vm06 (mon.0) 552 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013812+0000 mon.vm06 (mon.0) 553 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.013812+0000 mon.vm06 (mon.0) 553 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.017956+0000 mon.vm06 (mon.0) 554 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.017956+0000 mon.vm06 (mon.0) 554 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.017986+0000 mon.vm09 (mon.1) 19 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.017986+0000 mon.vm09 (mon.1) 19 : audit [INF] from='osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388]' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.020123+0000 mon.vm06 (mon.0) 555 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:55 vm06 bash[28114]: audit 2026-04-15T13:34:55.020123+0000 mon.vm06 (mon.0) 555 : audit [INF] from='osd.4 ' entity='osd.4' cmd={"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:55.526 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:55.919 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:56.000 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":17,"num_osds":8,"num_up_osds":3,"osd_up_since":1776260095,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:53.702945+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:53.702945+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:53.702955+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:53.702955+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.445239+0000 mon.vm06 (mon.0) 556 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.445239+0000 mon.vm06 (mon.0) 556 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.475770+0000 mon.vm06 (mon.0) 557 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.475770+0000 mon.vm06 (mon.0) 557 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.536120+0000 mon.vm06 (mon.0) 558 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.536120+0000 mon.vm06 (mon.0) 558 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.591816+0000 mon.vm06 (mon.0) 559 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.591816+0000 mon.vm06 (mon.0) 559 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.920053+0000 mon.vm06 (mon.0) 560 : audit [DBG] from='client.? 192.168.123.106:0/2120860016' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:55.920053+0000 mon.vm06 (mon.0) 560 : audit [DBG] from='client.? 192.168.123.106:0/2120860016' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.012663+0000 mon.vm06 (mon.0) 561 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.012663+0000 mon.vm06 (mon.0) 561 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.012736+0000 mon.vm06 (mon.0) 562 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.012736+0000 mon.vm06 (mon.0) 562 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:56.014892+0000 mon.vm06 (mon.0) 563 : cluster [INF] osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607] boot 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:56.014892+0000 mon.vm06 (mon.0) 563 : cluster [INF] osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607] boot 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:56.014928+0000 mon.vm06 (mon.0) 564 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:56.014928+0000 mon.vm06 (mon.0) 564 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015081+0000 mon.vm06 (mon.0) 565 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:56.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015081+0000 mon.vm06 (mon.0) 565 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015176+0000 mon.vm06 (mon.0) 566 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015176+0000 mon.vm06 (mon.0) 566 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015264+0000 mon.vm06 (mon.0) 567 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015264+0000 mon.vm06 (mon.0) 567 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015309+0000 mon.vm06 (mon.0) 568 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015309+0000 mon.vm06 (mon.0) 568 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015364+0000 mon.vm06 (mon.0) 569 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015364+0000 mon.vm06 (mon.0) 569 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015409+0000 mon.vm06 (mon.0) 570 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.015409+0000 mon.vm06 (mon.0) 570 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.018657+0000 mon.vm06 (mon.0) 571 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.018657+0000 mon.vm06 (mon.0) 571 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.152395+0000 mon.vm09 (mon.1) 20 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.152395+0000 mon.vm09 (mon.1) 20 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.154458+0000 mon.vm06 (mon.0) 572 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.154458+0000 mon.vm06 (mon.0) 572 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:56.169218+0000 mgr.vm06.qbbldl (mgr.14229) 83 : cluster [DBG] pgmap v30: 0 pgs: ; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: cluster 2026-04-15T13:34:56.169218+0000 mgr.vm06.qbbldl (mgr.14229) 83 : cluster [DBG] pgmap v30: 0 pgs: ; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.387504+0000 mon.vm06 (mon.0) 573 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.387504+0000 mon.vm06 (mon.0) 573 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.392159+0000 mon.vm06 (mon.0) 574 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.392159+0000 mon.vm06 (mon.0) 574 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.404248+0000 mon.vm06 (mon.0) 575 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:56.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:56 vm06 bash[28114]: audit 2026-04-15T13:34:56.404248+0000 mon.vm06 (mon.0) 575 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:53.702945+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:53.702945+0000 osd.2 (osd.2) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:53.702955+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:53.702955+0000 osd.2 (osd.2) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.445239+0000 mon.vm06 (mon.0) 556 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.445239+0000 mon.vm06 (mon.0) 556 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.475770+0000 mon.vm06 (mon.0) 557 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.475770+0000 mon.vm06 (mon.0) 557 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.536120+0000 mon.vm06 (mon.0) 558 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.536120+0000 mon.vm06 (mon.0) 558 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.591816+0000 mon.vm06 (mon.0) 559 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.591816+0000 mon.vm06 (mon.0) 559 : audit [INF] from='osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607]' entity='osd.3' 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.920053+0000 mon.vm06 (mon.0) 560 : audit [DBG] from='client.? 192.168.123.106:0/2120860016' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:55.920053+0000 mon.vm06 (mon.0) 560 : audit [DBG] from='client.? 192.168.123.106:0/2120860016' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.012663+0000 mon.vm06 (mon.0) 561 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.012663+0000 mon.vm06 (mon.0) 561 : audit [INF] from='osd.4 ' entity='osd.4' cmd='[{"prefix": "osd crush create-or-move", "id": 4, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.012736+0000 mon.vm06 (mon.0) 562 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.012736+0000 mon.vm06 (mon.0) 562 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["5"]}]': finished 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:56.014892+0000 mon.vm06 (mon.0) 563 : cluster [INF] osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607] boot 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:56.014892+0000 mon.vm06 (mon.0) 563 : cluster [INF] osd.3 [v2:192.168.123.106:6810/2627160607,v1:192.168.123.106:6811/2627160607] boot 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:56.014928+0000 mon.vm06 (mon.0) 564 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:56.014928+0000 mon.vm06 (mon.0) 564 : cluster [DBG] osdmap e18: 8 total, 4 up, 8 in 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015081+0000 mon.vm06 (mon.0) 565 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015081+0000 mon.vm06 (mon.0) 565 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd={"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015176+0000 mon.vm06 (mon.0) 566 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015176+0000 mon.vm06 (mon.0) 566 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 3} : dispatch 2026-04-15T13:34:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015264+0000 mon.vm06 (mon.0) 567 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015264+0000 mon.vm06 (mon.0) 567 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015309+0000 mon.vm06 (mon.0) 568 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015309+0000 mon.vm06 (mon.0) 568 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015364+0000 mon.vm06 (mon.0) 569 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015364+0000 mon.vm06 (mon.0) 569 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015409+0000 mon.vm06 (mon.0) 570 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.015409+0000 mon.vm06 (mon.0) 570 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.018657+0000 mon.vm06 (mon.0) 571 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.018657+0000 mon.vm06 (mon.0) 571 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.152395+0000 mon.vm09 (mon.1) 20 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.152395+0000 mon.vm09 (mon.1) 20 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.154458+0000 mon.vm06 (mon.0) 572 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.154458+0000 mon.vm06 (mon.0) 572 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:56.169218+0000 mgr.vm06.qbbldl (mgr.14229) 83 : cluster [DBG] pgmap v30: 0 pgs: ; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: cluster 2026-04-15T13:34:56.169218+0000 mgr.vm06.qbbldl (mgr.14229) 83 : cluster [DBG] pgmap v30: 0 pgs: ; 0 B data, 79 MiB used, 60 GiB / 60 GiB avail 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.387504+0000 mon.vm06 (mon.0) 573 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.387504+0000 mon.vm06 (mon.0) 573 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.392159+0000 mon.vm06 (mon.0) 574 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.392159+0000 mon.vm06 (mon.0) 574 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.404248+0000 mon.vm06 (mon.0) 575 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:56.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:56 vm09 bash[34466]: audit 2026-04-15T13:34:56.404248+0000 mon.vm06 (mon.0) 575 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:34:57.001 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:57.257 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:54.530198+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:54.530198+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:54.530208+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:54.530208+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:56.451758+0000 mon.vm06 (mon.0) 576 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:56.451758+0000 mon.vm06 (mon.0) 576 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:56.868786+0000 mon.vm09 (mon.1) 21 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:56.868786+0000 mon.vm09 (mon.1) 21 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:56.870847+0000 mon.vm06 (mon.0) 577 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:56.870847+0000 mon.vm06 (mon.0) 577 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016641+0000 mon.vm06 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016641+0000 mon.vm06 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016676+0000 mon.vm06 (mon.0) 579 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016676+0000 mon.vm06 (mon.0) 579 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016716+0000 mon.vm06 (mon.0) 580 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016716+0000 mon.vm06 (mon.0) 580 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016745+0000 mon.vm06 (mon.0) 581 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.016745+0000 mon.vm06 (mon.0) 581 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.018568+0000 mon.vm09 (mon.1) 22 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.018568+0000 mon.vm09 (mon.1) 22 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:57.019658+0000 mon.vm06 (mon.0) 582 : cluster [INF] osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388] boot 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:57.019658+0000 mon.vm06 (mon.0) 582 : cluster [INF] osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388] boot 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:57.019691+0000 mon.vm06 (mon.0) 583 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: cluster 2026-04-15T13:34:57.019691+0000 mon.vm06 (mon.0) 583 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021332+0000 mon.vm06 (mon.0) 584 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021332+0000 mon.vm06 (mon.0) 584 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021489+0000 mon.vm06 (mon.0) 585 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.564 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021489+0000 mon.vm06 (mon.0) 585 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021563+0000 mon.vm06 (mon.0) 586 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021563+0000 mon.vm06 (mon.0) 586 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021620+0000 mon.vm06 (mon.0) 587 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021620+0000 mon.vm06 (mon.0) 587 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021760+0000 mon.vm06 (mon.0) 588 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.021760+0000 mon.vm06 (mon.0) 588 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.022924+0000 mon.vm09 (mon.1) 23 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.022924+0000 mon.vm09 (mon.1) 23 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.023814+0000 mon.vm06 (mon.0) 589 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.023814+0000 mon.vm06 (mon.0) 589 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.023907+0000 mon.vm06 (mon.0) 590 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.023907+0000 mon.vm06 (mon.0) 590 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.030726+0000 mon.vm06 (mon.0) 591 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.565 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:57 vm06 bash[28114]: audit 2026-04-15T13:34:57.030726+0000 mon.vm06 (mon.0) 591 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.702 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:57.793 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":19,"num_osds":8,"num_up_osds":5,"osd_up_since":1776260097,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":0} 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:54.530198+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:54.530198+0000 osd.3 (osd.3) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:54.530208+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:54.530208+0000 osd.3 (osd.3) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:56.451758+0000 mon.vm06 (mon.0) 576 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:56.451758+0000 mon.vm06 (mon.0) 576 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:56.868786+0000 mon.vm09 (mon.1) 21 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:56.868786+0000 mon.vm09 (mon.1) 21 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:56.870847+0000 mon.vm06 (mon.0) 577 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:56.870847+0000 mon.vm06 (mon.0) 577 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016641+0000 mon.vm06 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016641+0000 mon.vm06 (mon.0) 578 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' cmd='[{"prefix": "osd crush create-or-move", "id": 5, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016676+0000 mon.vm06 (mon.0) 579 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016676+0000 mon.vm06 (mon.0) 579 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["6"]}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016716+0000 mon.vm06 (mon.0) 580 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016716+0000 mon.vm06 (mon.0) 580 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool create", "format": "json", "pool": ".mgr", "pg_num": 1, "pg_num_min": 1, "pg_num_max": 32, "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016745+0000 mon.vm06 (mon.0) 581 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.016745+0000 mon.vm06 (mon.0) 581 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush set-device-class", "class": "ssd", "ids": ["7"]}]': finished 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.018568+0000 mon.vm09 (mon.1) 22 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.018568+0000 mon.vm09 (mon.1) 22 : audit [INF] from='osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425]' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:57.019658+0000 mon.vm06 (mon.0) 582 : cluster [INF] osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388] boot 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:57.019658+0000 mon.vm06 (mon.0) 582 : cluster [INF] osd.4 [v2:192.168.123.109:6816/489839388,v1:192.168.123.109:6817/489839388] boot 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:57.019691+0000 mon.vm06 (mon.0) 583 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: cluster 2026-04-15T13:34:57.019691+0000 mon.vm06 (mon.0) 583 : cluster [DBG] osdmap e19: 8 total, 5 up, 8 in 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021332+0000 mon.vm06 (mon.0) 584 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021332+0000 mon.vm06 (mon.0) 584 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 4} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021489+0000 mon.vm06 (mon.0) 585 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021489+0000 mon.vm06 (mon.0) 585 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021563+0000 mon.vm06 (mon.0) 586 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021563+0000 mon.vm06 (mon.0) 586 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021620+0000 mon.vm06 (mon.0) 587 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021620+0000 mon.vm06 (mon.0) 587 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021760+0000 mon.vm06 (mon.0) 588 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.021760+0000 mon.vm06 (mon.0) 588 : audit [INF] from='osd.6 ' entity='osd.6' cmd={"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.022924+0000 mon.vm09 (mon.1) 23 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.022924+0000 mon.vm09 (mon.1) 23 : audit [INF] from='osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015]' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.023814+0000 mon.vm06 (mon.0) 589 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.023814+0000 mon.vm06 (mon.0) 589 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.023907+0000 mon.vm06 (mon.0) 590 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.023907+0000 mon.vm06 (mon.0) 590 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.030726+0000 mon.vm06 (mon.0) 591 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:57.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:57 vm09 bash[34466]: audit 2026-04-15T13:34:57.030726+0000 mon.vm06 (mon.0) 591 : audit [INF] from='osd.7 ' entity='osd.7' cmd={"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]} : dispatch 2026-04-15T13:34:58.793 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd stat -f json 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:55.672141+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:55.672141+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:55.672153+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:55.672153+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:56.522380+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:56.522380+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:56.522390+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:56.522390+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:57.569353+0000 mon.vm06 (mon.0) 592 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:57.569353+0000 mon.vm06 (mon.0) 592 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:57.702940+0000 mon.vm06 (mon.0) 593 : audit [DBG] from='client.? 192.168.123.106:0/3087759220' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:57.702940+0000 mon.vm06 (mon.0) 593 : audit [DBG] from='client.? 192.168.123.106:0/3087759220' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.021511+0000 mon.vm06 (mon.0) 594 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.021511+0000 mon.vm06 (mon.0) 594 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.021570+0000 mon.vm06 (mon.0) 595 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.021570+0000 mon.vm06 (mon.0) 595 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.021609+0000 mon.vm06 (mon.0) 596 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.021609+0000 mon.vm06 (mon.0) 596 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:58.023801+0000 mon.vm06 (mon.0) 597 : cluster [INF] osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361] boot 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:58.023801+0000 mon.vm06 (mon.0) 597 : cluster [INF] osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361] boot 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:58.023842+0000 mon.vm06 (mon.0) 598 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:58.023842+0000 mon.vm06 (mon.0) 598 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.024069+0000 mon.vm06 (mon.0) 599 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.024069+0000 mon.vm06 (mon.0) 599 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.024138+0000 mon.vm06 (mon.0) 600 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.024138+0000 mon.vm06 (mon.0) 600 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.024172+0000 mon.vm06 (mon.0) 601 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.024172+0000 mon.vm06 (mon.0) 601 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.030074+0000 mon.vm06 (mon.0) 602 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.030074+0000 mon.vm06 (mon.0) 602 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.032338+0000 mon.vm06 (mon.0) 603 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: audit 2026-04-15T13:34:58.032338+0000 mon.vm06 (mon.0) 603 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:58.169436+0000 mgr.vm06.qbbldl (mgr.14229) 84 : cluster [DBG] pgmap v33: 1 pgs: 1 unknown; 0 B data, 559 MiB used, 119 GiB / 120 GiB avail 2026-04-15T13:34:58.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:58 vm09 bash[34466]: cluster 2026-04-15T13:34:58.169436+0000 mgr.vm06.qbbldl (mgr.14229) 84 : cluster [DBG] pgmap v33: 1 pgs: 1 unknown; 0 B data, 559 MiB used, 119 GiB / 120 GiB avail 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:55.672141+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:55.672141+0000 osd.4 (osd.4) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:55.672153+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:55.672153+0000 osd.4 (osd.4) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:56.522380+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:56.522380+0000 osd.5 (osd.5) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:56.522390+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:56.522390+0000 osd.5 (osd.5) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:57.569353+0000 mon.vm06 (mon.0) 592 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:57.569353+0000 mon.vm06 (mon.0) 592 : audit [INF] from='osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361]' entity='osd.5' 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:57.702940+0000 mon.vm06 (mon.0) 593 : audit [DBG] from='client.? 192.168.123.106:0/3087759220' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:57.702940+0000 mon.vm06 (mon.0) 593 : audit [DBG] from='client.? 192.168.123.106:0/3087759220' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.021511+0000 mon.vm06 (mon.0) 594 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.021511+0000 mon.vm06 (mon.0) 594 : audit [INF] from='osd.6 ' entity='osd.6' cmd='[{"prefix": "osd crush create-or-move", "id": 6, "weight":0.0195, "args": ["host=vm09", "root=default"]}]': finished 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.021570+0000 mon.vm06 (mon.0) 595 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.021570+0000 mon.vm06 (mon.0) 595 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pool application enable", "format": "json", "pool": ".mgr", "app": "mgr", "yes_i_really_mean_it": true}]': finished 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.021609+0000 mon.vm06 (mon.0) 596 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.021609+0000 mon.vm06 (mon.0) 596 : audit [INF] from='osd.7 ' entity='osd.7' cmd='[{"prefix": "osd crush create-or-move", "id": 7, "weight":0.0195, "args": ["host=vm06", "root=default"]}]': finished 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:58.023801+0000 mon.vm06 (mon.0) 597 : cluster [INF] osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361] boot 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:58.023801+0000 mon.vm06 (mon.0) 597 : cluster [INF] osd.5 [v2:192.168.123.106:6818/3054582361,v1:192.168.123.106:6819/3054582361] boot 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:58.023842+0000 mon.vm06 (mon.0) 598 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:58.023842+0000 mon.vm06 (mon.0) 598 : cluster [DBG] osdmap e20: 8 total, 6 up, 8 in 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.024069+0000 mon.vm06 (mon.0) 599 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.024069+0000 mon.vm06 (mon.0) 599 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 5} : dispatch 2026-04-15T13:34:58.883 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.024138+0000 mon.vm06 (mon.0) 600 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.024138+0000 mon.vm06 (mon.0) 600 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.024172+0000 mon.vm06 (mon.0) 601 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.024172+0000 mon.vm06 (mon.0) 601 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.030074+0000 mon.vm06 (mon.0) 602 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.030074+0000 mon.vm06 (mon.0) 602 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.032338+0000 mon.vm06 (mon.0) 603 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: audit 2026-04-15T13:34:58.032338+0000 mon.vm06 (mon.0) 603 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:58.169436+0000 mgr.vm06.qbbldl (mgr.14229) 84 : cluster [DBG] pgmap v33: 1 pgs: 1 unknown; 0 B data, 559 MiB used, 119 GiB / 120 GiB avail 2026-04-15T13:34:58.884 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:58 vm06 bash[28114]: cluster 2026-04-15T13:34:58.169436+0000 mgr.vm06.qbbldl (mgr.14229) 84 : cluster [DBG] pgmap v33: 1 pgs: 1 unknown; 0 B data, 559 MiB used, 119 GiB / 120 GiB avail 2026-04-15T13:34:59.060 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:59.421 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:34:59.479 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":21,"num_osds":8,"num_up_osds":8,"osd_up_since":1776260099,"num_in_osds":8,"osd_in_since":1776260083,"num_remapped_pgs":1} 2026-04-15T13:34:59.480 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd dump --format=json 2026-04-15T13:34:59.730 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:57.133899+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:57.133899+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:57.133915+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:57.133915+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:58.587829+0000 mon.vm06 (mon.0) 604 : audit [INF] from='osd.7 ' entity='osd.7' 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:58.587829+0000 mon.vm06 (mon.0) 604 : audit [INF] from='osd.7 ' entity='osd.7' 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.027124+0000 mon.vm06 (mon.0) 605 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.027124+0000 mon.vm06 (mon.0) 605 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:59.029936+0000 mon.vm06 (mon.0) 606 : cluster [INF] osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425] boot 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:59.029936+0000 mon.vm06 (mon.0) 606 : cluster [INF] osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425] boot 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:59.029993+0000 mon.vm06 (mon.0) 607 : cluster [INF] osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015] boot 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:59.029993+0000 mon.vm06 (mon.0) 607 : cluster [INF] osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015] boot 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:59.030012+0000 mon.vm06 (mon.0) 608 : cluster [DBG] osdmap e21: 8 total, 8 up, 8 in 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: cluster 2026-04-15T13:34:59.030012+0000 mon.vm06 (mon.0) 608 : cluster [DBG] osdmap e21: 8 total, 8 up, 8 in 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.030099+0000 mon.vm06 (mon.0) 609 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.030099+0000 mon.vm06 (mon.0) 609 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.030243+0000 mon.vm06 (mon.0) 610 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.030243+0000 mon.vm06 (mon.0) 610 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.421932+0000 mon.vm06 (mon.0) 611 : audit [DBG] from='client.? 192.168.123.106:0/3331174544' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:59.748 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:34:59 vm06 bash[28114]: audit 2026-04-15T13:34:59.421932+0000 mon.vm06 (mon.0) 611 : audit [DBG] from='client.? 192.168.123.106:0/3331174544' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:57.133899+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:57.133899+0000 osd.6 (osd.6) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:57.133915+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:57.133915+0000 osd.6 (osd.6) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:58.587829+0000 mon.vm06 (mon.0) 604 : audit [INF] from='osd.7 ' entity='osd.7' 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:58.587829+0000 mon.vm06 (mon.0) 604 : audit [INF] from='osd.7 ' entity='osd.7' 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.027124+0000 mon.vm06 (mon.0) 605 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.027124+0000 mon.vm06 (mon.0) 605 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:59.029936+0000 mon.vm06 (mon.0) 606 : cluster [INF] osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425] boot 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:59.029936+0000 mon.vm06 (mon.0) 606 : cluster [INF] osd.6 [v2:192.168.123.109:6824/2064572425,v1:192.168.123.109:6825/2064572425] boot 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:59.029993+0000 mon.vm06 (mon.0) 607 : cluster [INF] osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015] boot 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:59.029993+0000 mon.vm06 (mon.0) 607 : cluster [INF] osd.7 [v2:192.168.123.106:6826/1155449015,v1:192.168.123.106:6827/1155449015] boot 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:59.030012+0000 mon.vm06 (mon.0) 608 : cluster [DBG] osdmap e21: 8 total, 8 up, 8 in 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: cluster 2026-04-15T13:34:59.030012+0000 mon.vm06 (mon.0) 608 : cluster [DBG] osdmap e21: 8 total, 8 up, 8 in 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.030099+0000 mon.vm06 (mon.0) 609 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.030099+0000 mon.vm06 (mon.0) 609 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 6} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.030243+0000 mon.vm06 (mon.0) 610 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.030243+0000 mon.vm06 (mon.0) 610 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd metadata", "id": 7} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.421932+0000 mon.vm06 (mon.0) 611 : audit [DBG] from='client.? 192.168.123.106:0/3331174544' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:34:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:34:59 vm09 bash[34466]: audit 2026-04-15T13:34:59.421932+0000 mon.vm06 (mon.0) 611 : audit [DBG] from='client.? 192.168.123.106:0/3331174544' entity='client.admin' cmd={"prefix": "osd stat", "format": "json"} : dispatch 2026-04-15T13:35:00.072 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:00.072 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":22,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","created":"2026-04-15T13:32:40.722525+0000","modified":"2026-04-15T13:35:00.028796+0000","last_up_change":"2026-04-15T13:34:59.022547+0000","last_in_change":"2026-04-15T13:34:43.989611+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":9,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-15T13:34:56.455012+0000","flags":32769,"flags_names":"hashpspool,creating","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"20","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"84143048-0065-4d27-be49-d79e8113f54d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6801","nonce":1671722834}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6803","nonce":1671722834}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6807","nonce":1671722834}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6805","nonce":1671722834}]},"public_addr":"192.168.123.109:6801/1671722834","cluster_addr":"192.168.123.109:6803/1671722834","heartbeat_back_addr":"192.168.123.109:6807/1671722834","heartbeat_front_addr":"192.168.123.109:6805/1671722834","state":["exists","up"]},{"osd":1,"uuid":"ef108fe4-72aa-4087-a097-576f30c554c6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":19,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6803","nonce":462013235}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6805","nonce":462013235}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6809","nonce":462013235}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6807","nonce":462013235}]},"public_addr":"192.168.123.106:6803/462013235","cluster_addr":"192.168.123.106:6805/462013235","heartbeat_back_addr":"192.168.123.106:6809/462013235","heartbeat_front_addr":"192.168.123.106:6807/462013235","state":["exists","up"]},{"osd":2,"uuid":"549138af-e58f-4394-97f4-7a8c79a44f47","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6809","nonce":1579250882}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6811","nonce":1579250882}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6815","nonce":1579250882}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6813","nonce":1579250882}]},"public_addr":"192.168.123.109:6809/1579250882","cluster_addr":"192.168.123.109:6811/1579250882","heartbeat_back_addr":"192.168.123.109:6815/1579250882","heartbeat_front_addr":"192.168.123.109:6813/1579250882","state":["exists","up"]},{"osd":3,"uuid":"dd7c0f1b-de8e-46ef-adf1-1743157ee826","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6811","nonce":2627160607}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6813","nonce":2627160607}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6817","nonce":2627160607}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6815","nonce":2627160607}]},"public_addr":"192.168.123.106:6811/2627160607","cluster_addr":"192.168.123.106:6813/2627160607","heartbeat_back_addr":"192.168.123.106:6817/2627160607","heartbeat_front_addr":"192.168.123.106:6815/2627160607","state":["exists","up"]},{"osd":4,"uuid":"9acb1f73-ff77-43b9-839d-40df5a4f00f9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6817","nonce":489839388}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6819","nonce":489839388}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6823","nonce":489839388}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6821","nonce":489839388}]},"public_addr":"192.168.123.109:6817/489839388","cluster_addr":"192.168.123.109:6819/489839388","heartbeat_back_addr":"192.168.123.109:6823/489839388","heartbeat_front_addr":"192.168.123.109:6821/489839388","state":["exists","up"]},{"osd":5,"uuid":"0c8f1316-78b2-4ecc-a5cc-29a4a232978f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6819","nonce":3054582361}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6821","nonce":3054582361}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6825","nonce":3054582361}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6823","nonce":3054582361}]},"public_addr":"192.168.123.106:6819/3054582361","cluster_addr":"192.168.123.106:6821/3054582361","heartbeat_back_addr":"192.168.123.106:6825/3054582361","heartbeat_front_addr":"192.168.123.106:6823/3054582361","state":["exists","up"]},{"osd":6,"uuid":"1eeffe1e-e81f-4304-8758-28d246c7bbc1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6825","nonce":2064572425}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6827","nonce":2064572425}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6831","nonce":2064572425}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6829","nonce":2064572425}]},"public_addr":"192.168.123.109:6825/2064572425","cluster_addr":"192.168.123.109:6827/2064572425","heartbeat_back_addr":"192.168.123.109:6831/2064572425","heartbeat_front_addr":"192.168.123.109:6829/2064572425","state":["exists","up"]},{"osd":7,"uuid":"2eefc3e2-2c3a-4ac3-846b-ab224d110bd8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6827","nonce":1155449015}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6829","nonce":1155449015}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6832","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6833","nonce":1155449015}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6831","nonce":1155449015}]},"public_addr":"192.168.123.106:6827/1155449015","cluster_addr":"192.168.123.106:6829/1155449015","heartbeat_back_addr":"192.168.123.106:6833/1155449015","heartbeat_front_addr":"192.168.123.106:6831/1155449015","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:52.338458+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:52.946112+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:53.702956+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:54.530210+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:55.672155+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:56.522391+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:57.133917+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:57.830615+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.106:0/1794388721":"2026-04-16T13:34:08.155627+0000","192.168.123.106:6800/3431857196":"2026-04-16T13:33:08.499683+0000","192.168.123.106:6801/235423043":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6801/3431857196":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/517616836":"2026-04-16T13:34:08.155627+0000","192.168.123.106:0/1351629426":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/4178947106":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/881279385":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/3417962058":"2026-04-16T13:34:08.155627+0000","192.168.123.106:0/901355294":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6800/235423043":"2026-04-16T13:33:29.098096+0000","192.168.123.106:0/3221750867":"2026-04-16T13:33:29.098096+0000","192.168.123.106:0/2993942436":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6800/1064036141":"2026-04-16T13:34:08.155627+0000","192.168.123.106:6801/1064036141":"2026-04-16T13:34:08.155627+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-15T13:35:00.158 INFO:tasks.cephadm.ceph_manager.ceph:[{'pool': 1, 'pool_name': '.mgr', 'create_time': '2026-04-15T13:34:56.455012+0000', 'flags': 32769, 'flags_names': 'hashpspool,creating', 'type': 1, 'size': 3, 'min_size': 2, 'crush_rule': 0, 'peering_crush_bucket_count': 0, 'peering_crush_bucket_target': 0, 'peering_crush_bucket_barrier': 0, 'peering_crush_bucket_mandatory_member': 2147483647, 'is_stretch_pool': False, 'object_hash': 2, 'pg_autoscale_mode': 'off', 'pg_num': 1, 'pg_placement_num': 1, 'pg_placement_num_target': 1, 'pg_num_target': 1, 'pg_num_pending': 1, 'last_pg_merge_meta': {'source_pgid': '0.0', 'ready_epoch': 0, 'last_epoch_started': 0, 'last_epoch_clean': 0, 'source_version': "0'0", 'target_version': "0'0"}, 'last_change': '20', 'last_force_op_resend': '0', 'last_force_op_resend_prenautilus': '0', 'last_force_op_resend_preluminous': '0', 'auid': 0, 'snap_mode': 'selfmanaged', 'snap_seq': 0, 'snap_epoch': 0, 'pool_snaps': [], 'removed_snaps': '[]', 'quota_max_bytes': 0, 'quota_max_objects': 0, 'tiers': [], 'tier_of': -1, 'read_tier': -1, 'write_tier': -1, 'cache_mode': 'none', 'target_max_bytes': 0, 'target_max_objects': 0, 'cache_target_dirty_ratio_micro': 400000, 'cache_target_dirty_high_ratio_micro': 600000, 'cache_target_full_ratio_micro': 800000, 'cache_min_flush_age': 0, 'cache_min_evict_age': 0, 'erasure_code_profile': '', 'hit_set_params': {'type': 'none'}, 'hit_set_period': 0, 'hit_set_count': 0, 'use_gmt_hitset': True, 'min_read_recency_for_promote': 0, 'min_write_recency_for_promote': 0, 'hit_set_grade_decay_rate': 0, 'hit_set_search_last_n': 0, 'grade_table': [], 'stripe_width': 0, 'expected_num_objects': 0, 'fast_read': False, 'nonprimary_shards': '{}', 'options': {'pg_num_max': 32, 'pg_num_min': 1}, 'application_metadata': {'mgr': {}}, 'read_balance': {'score_type': 'Fair distribution', 'score_acting': 7.889999866485596, 'score_stable': 7.889999866485596, 'optimal_score': 0.3799999952316284, 'raw_score_acting': 3, 'raw_score_stable': 3, 'primary_affinity_weighted': 1, 'average_primary_affinity': 1, 'average_primary_affinity_weighted': 1}}] 2026-04-15T13:35:00.158 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd pool get .mgr pg_num 2026-04-15T13:35:00.403 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:00.740 INFO:teuthology.orchestra.run.vm06.stdout:pg_num: 1 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:34:57.830603+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:34:57.830603+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:34:57.830613+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:34:57.830613+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:35:00.034996+0000 mon.vm06 (mon.0) 612 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:35:00.034996+0000 mon.vm06 (mon.0) 612 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: audit 2026-04-15T13:35:00.073148+0000 mon.vm06 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.106:0/4133043719' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: audit 2026-04-15T13:35:00.073148+0000 mon.vm06 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.106:0/4133043719' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:35:00.169645+0000 mgr.vm06.qbbldl (mgr.14229) 85 : cluster [DBG] pgmap v36: 1 pgs: 1 creating+remapped; 0 B data, 612 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:00.751 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:00 vm06 bash[28114]: cluster 2026-04-15T13:35:00.169645+0000 mgr.vm06.qbbldl (mgr.14229) 85 : cluster [DBG] pgmap v36: 1 pgs: 1 creating+remapped; 0 B data, 612 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:00.797 INFO:tasks.cephadm:Setting up client nodes... 2026-04-15T13:35:00.797 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph auth get-or-create client.0 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:34:57.830603+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:34:57.830603+0000 osd.7 (osd.7) 1 : cluster [DBG] purged_snaps scrub starts 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:34:57.830613+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:34:57.830613+0000 osd.7 (osd.7) 2 : cluster [DBG] purged_snaps scrub ok 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:35:00.034996+0000 mon.vm06 (mon.0) 612 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:35:00.034996+0000 mon.vm06 (mon.0) 612 : cluster [DBG] osdmap e22: 8 total, 8 up, 8 in 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: audit 2026-04-15T13:35:00.073148+0000 mon.vm06 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.106:0/4133043719' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: audit 2026-04-15T13:35:00.073148+0000 mon.vm06 (mon.0) 613 : audit [DBG] from='client.? 192.168.123.106:0/4133043719' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:35:00.169645+0000 mgr.vm06.qbbldl (mgr.14229) 85 : cluster [DBG] pgmap v36: 1 pgs: 1 creating+remapped; 0 B data, 612 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:00 vm09 bash[34466]: cluster 2026-04-15T13:35:00.169645+0000 mgr.vm06.qbbldl (mgr.14229) 85 : cluster [DBG] pgmap v36: 1 pgs: 1 creating+remapped; 0 B data, 612 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:01.073 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:01.593 INFO:teuthology.orchestra.run.vm06.stdout:[client.0] 2026-04-15T13:35:01.593 INFO:teuthology.orchestra.run.vm06.stdout: key = AQAFlN9pCixEIxAACMNLT5W8p9Xm1o0l/lVyfA== 2026-04-15T13:35:01.720 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:35:01.720 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/etc/ceph/ceph.client.0.keyring 2026-04-15T13:35:01.720 DEBUG:teuthology.orchestra.run.vm06:> sudo chmod 0644 /etc/ceph/ceph.client.0.keyring 2026-04-15T13:35:01.737 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph auth get-or-create client.1 mon 'allow *' osd 'allow *' mds 'allow *' mgr 'allow *' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:00.741485+0000 mon.vm06 (mon.0) 614 : audit [DBG] from='client.? 192.168.123.106:0/726671891' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:00.741485+0000 mon.vm06 (mon.0) 614 : audit [DBG] from='client.? 192.168.123.106:0/726671891' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: cluster 2026-04-15T13:35:01.044557+0000 mon.vm06 (mon.0) 615 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: cluster 2026-04-15T13:35:01.044557+0000 mon.vm06 (mon.0) 615 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.127992+0000 mon.vm06 (mon.0) 616 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.127992+0000 mon.vm06 (mon.0) 616 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.134609+0000 mon.vm06 (mon.0) 617 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.134609+0000 mon.vm06 (mon.0) 617 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.276236+0000 mon.vm06 (mon.0) 618 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.276236+0000 mon.vm06 (mon.0) 618 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.363438+0000 mon.vm06 (mon.0) 619 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.363438+0000 mon.vm06 (mon.0) 619 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.426706+0000 mon.vm06 (mon.0) 620 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.426706+0000 mon.vm06 (mon.0) 620 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.492203+0000 mon.vm06 (mon.0) 621 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.492203+0000 mon.vm06 (mon.0) 621 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.545354+0000 mon.vm06 (mon.0) 622 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:01.744 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:01 vm09 bash[34466]: audit 2026-04-15T13:35:01.545354+0000 mon.vm06 (mon.0) 622 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.002 INFO:teuthology.orchestra.run.vm09.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm09/config 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:00.741485+0000 mon.vm06 (mon.0) 614 : audit [DBG] from='client.? 192.168.123.106:0/726671891' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:00.741485+0000 mon.vm06 (mon.0) 614 : audit [DBG] from='client.? 192.168.123.106:0/726671891' entity='client.admin' cmd={"prefix": "osd pool get", "pool": ".mgr", "var": "pg_num"} : dispatch 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: cluster 2026-04-15T13:35:01.044557+0000 mon.vm06 (mon.0) 615 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: cluster 2026-04-15T13:35:01.044557+0000 mon.vm06 (mon.0) 615 : cluster [DBG] osdmap e23: 8 total, 8 up, 8 in 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.127992+0000 mon.vm06 (mon.0) 616 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.127992+0000 mon.vm06 (mon.0) 616 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.134609+0000 mon.vm06 (mon.0) 617 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.134609+0000 mon.vm06 (mon.0) 617 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.276236+0000 mon.vm06 (mon.0) 618 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.276236+0000 mon.vm06 (mon.0) 618 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.363438+0000 mon.vm06 (mon.0) 619 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.363438+0000 mon.vm06 (mon.0) 619 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.426706+0000 mon.vm06 (mon.0) 620 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.426706+0000 mon.vm06 (mon.0) 620 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.492203+0000 mon.vm06 (mon.0) 621 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.492203+0000 mon.vm06 (mon.0) 621 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.545354+0000 mon.vm06 (mon.0) 622 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:01 vm06 bash[28114]: audit 2026-04-15T13:35:01.545354+0000 mon.vm06 (mon.0) 622 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.449 INFO:teuthology.orchestra.run.vm09.stdout:[client.1] 2026-04-15T13:35:02.449 INFO:teuthology.orchestra.run.vm09.stdout: key = AQAGlN9p4quaGhAAYqqKdLG8j/OhKpdpWVjUjA== 2026-04-15T13:35:02.543 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:35:02.544 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/etc/ceph/ceph.client.1.keyring 2026-04-15T13:35:02.544 DEBUG:teuthology.orchestra.run.vm09:> sudo chmod 0644 /etc/ceph/ceph.client.1.keyring 2026-04-15T13:35:02.602 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph config log 1 --format=json 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.591553+0000 mon.vm06 (mon.0) 623 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.591553+0000 mon.vm06 (mon.0) 623 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.594125+0000 mon.vm06 (mon.0) 624 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.594125+0000 mon.vm06 (mon.0) 624 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.605989+0000 mon.vm06 (mon.0) 625 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.605989+0000 mon.vm06 (mon.0) 625 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.683913+0000 mon.vm06 (mon.0) 626 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.683913+0000 mon.vm06 (mon.0) 626 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.776945+0000 mon.vm06 (mon.0) 627 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.776945+0000 mon.vm06 (mon.0) 627 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.778390+0000 mon.vm06 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.778390+0000 mon.vm06 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.805605+0000 mon.vm06 (mon.0) 629 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.805605+0000 mon.vm06 (mon.0) 629 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.805925+0000 mon.vm06 (mon.0) 630 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.805925+0000 mon.vm06 (mon.0) 630 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.806060+0000 mon.vm06 (mon.0) 631 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.806060+0000 mon.vm06 (mon.0) 631 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.806073+0000 mon.vm09 (mon.1) 24 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.806073+0000 mon.vm09 (mon.1) 24 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.808149+0000 mon.vm06 (mon.0) 632 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.808149+0000 mon.vm06 (mon.0) 632 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.808254+0000 mon.vm06 (mon.0) 633 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.808254+0000 mon.vm06 (mon.0) 633 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.826576+0000 mon.vm09 (mon.1) 25 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.826576+0000 mon.vm09 (mon.1) 25 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.836180+0000 mon.vm06 (mon.0) 634 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.836180+0000 mon.vm06 (mon.0) 634 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.842394+0000 mon.vm06 (mon.0) 635 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:01.842394+0000 mon.vm06 (mon.0) 635 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: cluster 2026-04-15T13:35:02.048295+0000 mon.vm06 (mon.0) 636 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: cluster 2026-04-15T13:35:02.048295+0000 mon.vm06 (mon.0) 636 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: cluster 2026-04-15T13:35:02.169852+0000 mgr.vm06.qbbldl (mgr.14229) 86 : cluster [DBG] pgmap v39: 1 pgs: 1 creating+remapped; 0 B data, 613 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: cluster 2026-04-15T13:35:02.169852+0000 mgr.vm06.qbbldl (mgr.14229) 86 : cluster [DBG] pgmap v39: 1 pgs: 1 creating+remapped; 0 B data, 613 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:02.446167+0000 mon.vm06 (mon.0) 637 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:02.446167+0000 mon.vm06 (mon.0) 637 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:02.450494+0000 mon.vm06 (mon.0) 638 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:02 vm09 bash[34466]: audit 2026-04-15T13:35:02.450494+0000 mon.vm06 (mon.0) 638 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.878 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.591553+0000 mon.vm06 (mon.0) 623 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.591553+0000 mon.vm06 (mon.0) 623 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.594125+0000 mon.vm06 (mon.0) 624 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.594125+0000 mon.vm06 (mon.0) 624 : audit [INF] from='client.? 192.168.123.106:0/3851352144' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.0", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.605989+0000 mon.vm06 (mon.0) 625 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.605989+0000 mon.vm06 (mon.0) 625 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.683913+0000 mon.vm06 (mon.0) 626 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.683913+0000 mon.vm06 (mon.0) 626 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.776945+0000 mon.vm06 (mon.0) 627 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.776945+0000 mon.vm06 (mon.0) 627 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.778390+0000 mon.vm06 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.778390+0000 mon.vm06 (mon.0) 628 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.805605+0000 mon.vm06 (mon.0) 629 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.805605+0000 mon.vm06 (mon.0) 629 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.805925+0000 mon.vm06 (mon.0) 630 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.805925+0000 mon.vm06 (mon.0) 630 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.806060+0000 mon.vm06 (mon.0) 631 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.806060+0000 mon.vm06 (mon.0) 631 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.806073+0000 mon.vm09 (mon.1) 24 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.806073+0000 mon.vm09 (mon.1) 24 : audit [INF] from='admin socket' entity='admin socket' cmd='smart' args=[json]: dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.808149+0000 mon.vm06 (mon.0) 632 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.808149+0000 mon.vm06 (mon.0) 632 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm06"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.808254+0000 mon.vm06 (mon.0) 633 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.808254+0000 mon.vm06 (mon.0) 633 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "mon metadata", "id": "vm09"} : dispatch 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.826576+0000 mon.vm09 (mon.1) 25 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.826576+0000 mon.vm09 (mon.1) 25 : audit [INF] from='admin socket' entity='admin socket' cmd=smart args=[json]: finished 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.836180+0000 mon.vm06 (mon.0) 634 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.836180+0000 mon.vm06 (mon.0) 634 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.842394+0000 mon.vm06 (mon.0) 635 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:01.842394+0000 mon.vm06 (mon.0) 635 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: cluster 2026-04-15T13:35:02.048295+0000 mon.vm06 (mon.0) 636 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: cluster 2026-04-15T13:35:02.048295+0000 mon.vm06 (mon.0) 636 : cluster [DBG] osdmap e24: 8 total, 8 up, 8 in 2026-04-15T13:35:02.923 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: cluster 2026-04-15T13:35:02.169852+0000 mgr.vm06.qbbldl (mgr.14229) 86 : cluster [DBG] pgmap v39: 1 pgs: 1 creating+remapped; 0 B data, 613 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:02.924 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: cluster 2026-04-15T13:35:02.169852+0000 mgr.vm06.qbbldl (mgr.14229) 86 : cluster [DBG] pgmap v39: 1 pgs: 1 creating+remapped; 0 B data, 613 MiB used, 159 GiB / 160 GiB avail 2026-04-15T13:35:02.924 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:02.446167+0000 mon.vm06 (mon.0) 637 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.924 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:02.446167+0000 mon.vm06 (mon.0) 637 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd={"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]} : dispatch 2026-04-15T13:35:02.924 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:02.450494+0000 mon.vm06 (mon.0) 638 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:02.924 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:02 vm06 bash[28114]: audit 2026-04-15T13:35:02.450494+0000 mon.vm06 (mon.0) 638 : audit [INF] from='client.? 192.168.123.109:0/460740565' entity='client.admin' cmd='[{"prefix": "auth get-or-create", "entity": "client.1", "caps": ["mon", "allow *", "osd", "allow *", "mds", "allow *", "mgr", "allow *"]}]': finished 2026-04-15T13:35:03.258 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:03.358 INFO:teuthology.orchestra.run.vm06.stdout:[{"version":18,"timestamp":"2026-04-15T13:34:58.584544+0000","name":"","changes":[{"name":"osd.7/osd_mclock_max_capacity_iops_ssd","new_value":"40829.757577"}]}] 2026-04-15T13:35:03.359 INFO:tasks.ceph_manager:config epoch is 18 2026-04-15T13:35:03.359 INFO:tasks.ceph:Waiting until ceph daemons up and pgs clean... 2026-04-15T13:35:03.359 INFO:tasks.cephadm.ceph_manager.ceph:waiting for mgr available 2026-04-15T13:35:03.359 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mgr dump --format=json 2026-04-15T13:35:03.621 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:04.033 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:04.100 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":19,"flags":0,"active_gid":14229,"active_name":"vm06.qbbldl","active_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6800","nonce":816869170},{"type":"v1","addr":"192.168.123.106:6801","nonce":816869170}]},"active_addr":"192.168.123.106:6801/816869170","active_change":"2026-04-15T13:34:08.155914+0000","active_mgr_features":4541880224203014143,"available":true,"standbys":[{"gid":14258,"name":"vm09.kpawde","mgr_features":4541880224203014143,"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}]}],"modules":["cephadm","dashboard","iostat","nfs","prometheus"],"available_modules":[{"name":"alerts","can_run":true,"error_string":"","module_options":{"interval":{"name":"interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"How frequently to reexamine health status","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"smtp_destination":{"name":"smtp_destination","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Email address to send alerts to, use commas to separate multiple","long_desc":"","tags":[],"see_also":[]},"smtp_from_name":{"name":"smtp_from_name","type":"str","level":"advanced","flags":1,"default_value":"Ceph","min":"","max":"","enum_allowed":[],"desc":"Email From: name","long_desc":"","tags":[],"see_also":[]},"smtp_host":{"name":"smtp_host","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_password":{"name":"smtp_password","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Password to authenticate with","long_desc":"","tags":[],"see_also":[]},"smtp_port":{"name":"smtp_port","type":"int","level":"advanced","flags":1,"default_value":"465","min":"","max":"","enum_allowed":[],"desc":"SMTP port","long_desc":"","tags":[],"see_also":[]},"smtp_sender":{"name":"smtp_sender","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"SMTP envelope sender","long_desc":"","tags":[],"see_also":[]},"smtp_ssl":{"name":"smtp_ssl","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Use SSL to connect to SMTP server","long_desc":"","tags":[],"see_also":[]},"smtp_user":{"name":"smtp_user","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"User to authenticate as","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"balancer","can_run":true,"error_string":"","module_options":{"active":{"name":"active","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically balance PGs across cluster","long_desc":"","tags":[],"see_also":[]},"begin_time":{"name":"begin_time","type":"str","level":"advanced","flags":1,"default_value":"0000","min":"","max":"","enum_allowed":[],"desc":"beginning time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"begin_weekday":{"name":"begin_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to this day of the week or later","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"crush_compat_max_iterations":{"name":"crush_compat_max_iterations","type":"uint","level":"advanced","flags":1,"default_value":"25","min":"1","max":"250","enum_allowed":[],"desc":"maximum number of iterations to attempt optimization","long_desc":"","tags":[],"see_also":[]},"crush_compat_metrics":{"name":"crush_compat_metrics","type":"str","level":"advanced","flags":1,"default_value":"pgs,objects,bytes","min":"","max":"","enum_allowed":[],"desc":"metrics with which to calculate OSD utilization","long_desc":"Value is a list of one or more of \"pgs\", \"objects\", or \"bytes\", and indicates which metrics to use to balance utilization.","tags":[],"see_also":[]},"crush_compat_step":{"name":"crush_compat_step","type":"float","level":"advanced","flags":1,"default_value":"0.5","min":"0.001","max":"0.999","enum_allowed":[],"desc":"aggressiveness of optimization","long_desc":".99 is very aggressive, .01 is less aggressive","tags":[],"see_also":[]},"end_time":{"name":"end_time","type":"str","level":"advanced","flags":1,"default_value":"2359","min":"","max":"","enum_allowed":[],"desc":"ending time of day to automatically balance","long_desc":"This is a time of day in the format HHMM.","tags":[],"see_also":[]},"end_weekday":{"name":"end_weekday","type":"uint","level":"advanced","flags":1,"default_value":"0","min":"0","max":"6","enum_allowed":[],"desc":"Restrict automatic balancing to days of the week earlier than this","long_desc":"0 = Sunday, 1 = Monday, etc.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_score":{"name":"min_score","type":"float","level":"advanced","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"minimum score, below which no optimization is attempted","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":1,"default_value":"upmap","min":"","max":"","enum_allowed":["crush-compat","none","read","upmap","upmap-read"],"desc":"Balancer mode","long_desc":"","tags":[],"see_also":[]},"pool_ids":{"name":"pool_ids","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"pools which the automatic balancing will be limited to","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and attempt optimization","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_pg_upmap_activity":{"name":"update_pg_upmap_activity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Updates pg_upmap activity stats to be used in `balancer status detail`","long_desc":"","tags":[],"see_also":[]},"upmap_max_deviation":{"name":"upmap_max_deviation","type":"int","level":"advanced","flags":1,"default_value":"5","min":"1","max":"","enum_allowed":[],"desc":"deviation below which no optimization is attempted","long_desc":"If the number of PGs are within this count then no optimization is attempted","tags":[],"see_also":[]},"upmap_max_optimizations":{"name":"upmap_max_optimizations","type":"uint","level":"advanced","flags":1,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"maximum upmap optimizations to make per attempt","long_desc":"","tags":[],"see_also":[]}}},{"name":"cephadm","can_run":true,"error_string":"","module_options":{"agent_down_multiplier":{"name":"agent_down_multiplier","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"","max":"","enum_allowed":[],"desc":"Multiplied by agent refresh rate to calculate how long agent must not report before being marked down","long_desc":"","tags":[],"see_also":[]},"agent_refresh_rate":{"name":"agent_refresh_rate","type":"secs","level":"advanced","flags":0,"default_value":"20","min":"","max":"","enum_allowed":[],"desc":"How often agent on each host will try to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"agent_starting_port":{"name":"agent_starting_port","type":"int","level":"advanced","flags":0,"default_value":"4721","min":"","max":"","enum_allowed":[],"desc":"First port agent will try to bind to (will also try up to next 1000 subsequent ports if blocked)","long_desc":"","tags":[],"see_also":[]},"allow_ptrace":{"name":"allow_ptrace","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow SYS_PTRACE capability on ceph containers","long_desc":"The SYS_PTRACE capability is needed to attach to a process with gdb or strace. Enabling this options can allow debugging daemons that encounter problems at runtime.","tags":[],"see_also":[]},"autotune_interval":{"name":"autotune_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to autotune daemon memory","long_desc":"","tags":[],"see_also":[]},"autotune_memory_target_ratio":{"name":"autotune_memory_target_ratio","type":"float","level":"advanced","flags":0,"default_value":"0.7","min":"","max":"","enum_allowed":[],"desc":"ratio of total system memory to divide amongst autotuned daemons","long_desc":"","tags":[],"see_also":[]},"cephadm_log_destination":{"name":"cephadm_log_destination","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":["file","file,syslog","syslog"],"desc":"Destination for cephadm command's persistent logging","long_desc":"","tags":[],"see_also":[]},"certificate_automated_rotation_enabled":{"name":"certificate_automated_rotation_enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"This flag controls whether cephadm automatically rotates certificates upon expiration.","long_desc":"","tags":[],"see_also":[]},"certificate_check_debug_mode":{"name":"certificate_check_debug_mode","type":"bool","level":"dev","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"FOR TESTING ONLY: This flag forces the certificate check instead of waiting for certificate_check_period.","long_desc":"","tags":[],"see_also":[]},"certificate_check_period":{"name":"certificate_check_period","type":"int","level":"advanced","flags":0,"default_value":"1","min":"0","max":"30","enum_allowed":[],"desc":"Specifies how often (in days) the certificate should be checked for validity.","long_desc":"","tags":[],"see_also":[]},"certificate_duration_days":{"name":"certificate_duration_days","type":"int","level":"advanced","flags":0,"default_value":"1095","min":"90","max":"3650","enum_allowed":[],"desc":"Specifies the duration of self certificates generated and signed by cephadm root CA","long_desc":"","tags":[],"see_also":[]},"certificate_renewal_threshold_days":{"name":"certificate_renewal_threshold_days","type":"int","level":"advanced","flags":0,"default_value":"30","min":"10","max":"90","enum_allowed":[],"desc":"Specifies the lead time in days to initiate certificate renewal before expiration.","long_desc":"","tags":[],"see_also":[]},"cgroups_split":{"name":"cgroups_split","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Pass --cgroups=split when cephadm creates containers (currently podman only)","long_desc":"","tags":[],"see_also":[]},"config_checks_enabled":{"name":"config_checks_enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable or disable the cephadm configuration analysis","long_desc":"","tags":[],"see_also":[]},"config_dashboard":{"name":"config_dashboard","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"manage configs like API endpoints in Dashboard.","long_desc":"","tags":[],"see_also":[]},"container_image_alertmanager":{"name":"container_image_alertmanager","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/alertmanager:v0.28.1","min":"","max":"","enum_allowed":[],"desc":"Alertmanager container image","long_desc":"","tags":[],"see_also":[]},"container_image_base":{"name":"container_image_base","type":"str","level":"advanced","flags":1,"default_value":"quay.io/ceph/ceph","min":"","max":"","enum_allowed":[],"desc":"Container image name, without the tag","long_desc":"","tags":[],"see_also":[]},"container_image_elasticsearch":{"name":"container_image_elasticsearch","type":"str","level":"advanced","flags":0,"default_value":"quay.io/omrizeneva/elasticsearch:6.8.23","min":"","max":"","enum_allowed":[],"desc":"Elasticsearch container image","long_desc":"","tags":[],"see_also":[]},"container_image_grafana":{"name":"container_image_grafana","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/grafana:12.2.0","min":"","max":"","enum_allowed":[],"desc":"Grafana container image","long_desc":"","tags":[],"see_also":[]},"container_image_haproxy":{"name":"container_image_haproxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/haproxy:2.3","min":"","max":"","enum_allowed":[],"desc":"Haproxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_agent":{"name":"container_image_jaeger_agent","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-agent:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger agent container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_collector":{"name":"container_image_jaeger_collector","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-collector:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger collector container image","long_desc":"","tags":[],"see_also":[]},"container_image_jaeger_query":{"name":"container_image_jaeger_query","type":"str","level":"advanced","flags":0,"default_value":"quay.io/jaegertracing/jaeger-query:1.29","min":"","max":"","enum_allowed":[],"desc":"Jaeger query container image","long_desc":"","tags":[],"see_also":[]},"container_image_keepalived":{"name":"container_image_keepalived","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/keepalived:2.2.4","min":"","max":"","enum_allowed":[],"desc":"Keepalived container image","long_desc":"","tags":[],"see_also":[]},"container_image_loki":{"name":"container_image_loki","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/loki:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Loki container image","long_desc":"","tags":[],"see_also":[]},"container_image_nginx":{"name":"container_image_nginx","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nginx:sclorg-nginx-126","min":"","max":"","enum_allowed":[],"desc":"Nginx container image","long_desc":"","tags":[],"see_also":[]},"container_image_node_exporter":{"name":"container_image_node_exporter","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/node-exporter:v1.9.1","min":"","max":"","enum_allowed":[],"desc":"Node exporter container image","long_desc":"","tags":[],"see_also":[]},"container_image_nvmeof":{"name":"container_image_nvmeof","type":"str","level":"advanced","flags":0,"default_value":"quay.io/ceph/nvmeof:1.5","min":"","max":"","enum_allowed":[],"desc":"Nvmeof container image","long_desc":"","tags":[],"see_also":[]},"container_image_oauth2_proxy":{"name":"container_image_oauth2_proxy","type":"str","level":"advanced","flags":0,"default_value":"quay.io/oauth2-proxy/oauth2-proxy:v7.6.0","min":"","max":"","enum_allowed":[],"desc":"Oauth2 proxy container image","long_desc":"","tags":[],"see_also":[]},"container_image_prometheus":{"name":"container_image_prometheus","type":"str","level":"advanced","flags":0,"default_value":"quay.io/prometheus/prometheus:v3.6.0","min":"","max":"","enum_allowed":[],"desc":"Prometheus container image","long_desc":"","tags":[],"see_also":[]},"container_image_promtail":{"name":"container_image_promtail","type":"str","level":"advanced","flags":0,"default_value":"docker.io/grafana/promtail:3.0.0","min":"","max":"","enum_allowed":[],"desc":"Promtail container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba":{"name":"container_image_samba","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-server:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba container image","long_desc":"","tags":[],"see_also":[]},"container_image_samba_metrics":{"name":"container_image_samba_metrics","type":"str","level":"advanced","flags":0,"default_value":"quay.io/samba.org/samba-metrics:ceph20-centos-amd64","min":"","max":"","enum_allowed":[],"desc":"Samba metrics container image","long_desc":"","tags":[],"see_also":[]},"container_image_snmp_gateway":{"name":"container_image_snmp_gateway","type":"str","level":"advanced","flags":0,"default_value":"docker.io/maxwo/snmp-notifier:v1.2.1","min":"","max":"","enum_allowed":[],"desc":"Snmp gateway container image","long_desc":"","tags":[],"see_also":[]},"container_init":{"name":"container_init","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Run podman/docker with `--init`","long_desc":"","tags":[],"see_also":[]},"daemon_cache_timeout":{"name":"daemon_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"seconds to cache service (daemon) inventory","long_desc":"","tags":[],"see_also":[]},"default_cephadm_command_timeout":{"name":"default_cephadm_command_timeout","type":"int","level":"advanced","flags":0,"default_value":"900","min":"","max":"","enum_allowed":[],"desc":"Default timeout applied to cephadm commands run directly on the host (in seconds)","long_desc":"","tags":[],"see_also":[]},"default_registry":{"name":"default_registry","type":"str","level":"advanced","flags":0,"default_value":"quay.io","min":"","max":"","enum_allowed":[],"desc":"Search-registry to which we should normalize unqualified image names. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"device_cache_timeout":{"name":"device_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"seconds to cache device inventory","long_desc":"","tags":[],"see_also":[]},"device_enhanced_scan":{"name":"device_enhanced_scan","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use libstoragemgmt during device scans","long_desc":"","tags":[],"see_also":[]},"facts_cache_timeout":{"name":"facts_cache_timeout","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"seconds to cache host facts data","long_desc":"","tags":[],"see_also":[]},"grafana_dashboards_path":{"name":"grafana_dashboards_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/grafana/dashboards/ceph-dashboard/","min":"","max":"","enum_allowed":[],"desc":"location of dashboards to include in grafana deployments","long_desc":"","tags":[],"see_also":[]},"host_check_interval":{"name":"host_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to perform a host check","long_desc":"","tags":[],"see_also":[]},"hw_monitoring":{"name":"hw_monitoring","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Deploy hw monitoring daemon on every host.","long_desc":"","tags":[],"see_also":[]},"inventory_list_all":{"name":"inventory_list_all","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Whether ceph-volume inventory should report more devices (mostly mappers (LVs / mpaths), partitions...)","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_refresh_metadata":{"name":"log_refresh_metadata","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Log all refresh metadata. Includes daemon, device, and host info collected regularly. Only has effect if logging at debug level","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"log to the \"cephadm\" cluster log channel\"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf":{"name":"manage_etc_ceph_ceph_conf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Manage and own /etc/ceph/ceph.conf on the hosts.","long_desc":"","tags":[],"see_also":[]},"manage_etc_ceph_ceph_conf_hosts":{"name":"manage_etc_ceph_ceph_conf_hosts","type":"str","level":"advanced","flags":0,"default_value":"*","min":"","max":"","enum_allowed":[],"desc":"PlacementSpec describing on which hosts to manage /etc/ceph/ceph.conf","long_desc":"","tags":[],"see_also":[]},"max_count_per_host":{"name":"max_count_per_host","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of daemons per service per host","long_desc":"","tags":[],"see_also":[]},"max_osd_draining_count":{"name":"max_osd_draining_count","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"max number of osds that will be drained simultaneously when osds are removed","long_desc":"","tags":[],"see_also":[]},"migration_current":{"name":"migration_current","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"internal - do not modify","long_desc":"","tags":[],"see_also":[]},"mode":{"name":"mode","type":"str","level":"advanced","flags":0,"default_value":"root","min":"","max":"","enum_allowed":["cephadm-package","root"],"desc":"mode for remote execution of cephadm","long_desc":"","tags":[],"see_also":[]},"oob_default_addr":{"name":"oob_default_addr","type":"str","level":"advanced","flags":0,"default_value":"169.254.1.1","min":"","max":"","enum_allowed":[],"desc":"Default address for RedFish API (oob management).","long_desc":"","tags":[],"see_also":[]},"prometheus_alerts_path":{"name":"prometheus_alerts_path","type":"str","level":"advanced","flags":0,"default_value":"/etc/prometheus/ceph/ceph_default_alerts.yml","min":"","max":"","enum_allowed":[],"desc":"location of alerts to include in prometheus deployments","long_desc":"","tags":[],"see_also":[]},"registry_insecure":{"name":"registry_insecure","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Registry is to be considered insecure (no TLS available). Only for development purposes.","long_desc":"","tags":[],"see_also":[]},"registry_password":{"name":"registry_password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository password. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"registry_url":{"name":"registry_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Registry url for login purposes. This is not the default registry","long_desc":"","tags":[],"see_also":[]},"registry_username":{"name":"registry_username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"Custom repository username. Only used for logging into a registry.","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"service_discovery_port":{"name":"service_discovery_port","type":"int","level":"advanced","flags":0,"default_value":"8765","min":"","max":"","enum_allowed":[],"desc":"cephadm service discovery port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssh_config_file":{"name":"ssh_config_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"customized SSH config file to connect to managed hosts","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_count_max":{"name":"ssh_keepalive_count_max","type":"int","level":"advanced","flags":0,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"How many times ssh connections can fail liveness checks before the host is marked offline","long_desc":"","tags":[],"see_also":[]},"ssh_keepalive_interval":{"name":"ssh_keepalive_interval","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"How often ssh connections are checked for liveness","long_desc":"","tags":[],"see_also":[]},"stray_daemon_check_interval":{"name":"stray_daemon_check_interval","type":"secs","level":"advanced","flags":0,"default_value":"1800","min":"","max":"","enum_allowed":[],"desc":"how frequently cephadm should check for the presence of stray daemons","long_desc":"","tags":[],"see_also":[]},"use_agent":{"name":"use_agent","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Use cephadm agent on each host to gather and send metadata","long_desc":"","tags":[],"see_also":[]},"use_repo_digest":{"name":"use_repo_digest","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Automatically convert image tags to image digest. Make sure all daemons use the same image","long_desc":"","tags":[],"see_also":[]},"warn_on_failed_host_check":{"name":"warn_on_failed_host_check","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if the host check fails","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_daemons":{"name":"warn_on_stray_daemons","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected that are not managed by cephadm","long_desc":"","tags":[],"see_also":[]},"warn_on_stray_hosts":{"name":"warn_on_stray_hosts","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"raise a health warning if daemons are detected on a host that is not managed by cephadm","long_desc":"","tags":[],"see_also":[]}}},{"name":"crash","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"retain_interval":{"name":"retain_interval","type":"secs","level":"advanced","flags":1,"default_value":"31536000","min":"","max":"","enum_allowed":[],"desc":"how long to retain crashes before pruning them","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_recent_interval":{"name":"warn_recent_interval","type":"secs","level":"advanced","flags":1,"default_value":"1209600","min":"","max":"","enum_allowed":[],"desc":"time interval in which to warn about recent crashes","long_desc":"","tags":[],"see_also":[]}}},{"name":"dashboard","can_run":true,"error_string":"","module_options":{"ACCOUNT_LOCKOUT_ATTEMPTS":{"name":"ACCOUNT_LOCKOUT_ATTEMPTS","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_HOST":{"name":"ALERTMANAGER_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ALERTMANAGER_API_SSL_VERIFY":{"name":"ALERTMANAGER_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_ENABLED":{"name":"AUDIT_API_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"AUDIT_API_LOG_PAYLOAD":{"name":"AUDIT_API_LOG_PAYLOAD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ENABLE_BROWSABLE_API":{"name":"ENABLE_BROWSABLE_API","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_CEPHFS":{"name":"FEATURE_TOGGLE_CEPHFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_DASHBOARD":{"name":"FEATURE_TOGGLE_DASHBOARD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_ISCSI":{"name":"FEATURE_TOGGLE_ISCSI","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_MIRRORING":{"name":"FEATURE_TOGGLE_MIRRORING","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_NFS":{"name":"FEATURE_TOGGLE_NFS","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RBD":{"name":"FEATURE_TOGGLE_RBD","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"FEATURE_TOGGLE_RGW":{"name":"FEATURE_TOGGLE_RGW","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE":{"name":"GANESHA_CLUSTERS_RADOS_POOL_NAMESPACE","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_PASSWORD":{"name":"GRAFANA_API_PASSWORD","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_SSL_VERIFY":{"name":"GRAFANA_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_URL":{"name":"GRAFANA_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_API_USERNAME":{"name":"GRAFANA_API_USERNAME","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_FRONTEND_API_URL":{"name":"GRAFANA_FRONTEND_API_URL","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"GRAFANA_UPDATE_DASHBOARDS":{"name":"GRAFANA_UPDATE_DASHBOARDS","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISCSI_API_SSL_VERIFICATION":{"name":"ISCSI_API_SSL_VERIFICATION","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ISSUE_TRACKER_API_KEY":{"name":"ISSUE_TRACKER_API_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MANAGED_BY_CLUSTERS":{"name":"MANAGED_BY_CLUSTERS","type":"str","level":"advanced","flags":0,"default_value":"[]","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"MULTICLUSTER_CONFIG":{"name":"MULTICLUSTER_CONFIG","type":"str","level":"advanced","flags":0,"default_value":"{}","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_HOST":{"name":"PROMETHEUS_API_HOST","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PROMETHEUS_API_SSL_VERIFY":{"name":"PROMETHEUS_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_COMPLEXITY_ENABLED":{"name":"PWD_POLICY_CHECK_COMPLEXITY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED":{"name":"PWD_POLICY_CHECK_EXCLUSION_LIST_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_LENGTH_ENABLED":{"name":"PWD_POLICY_CHECK_LENGTH_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_OLDPWD_ENABLED":{"name":"PWD_POLICY_CHECK_OLDPWD_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_REPETITIVE_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED":{"name":"PWD_POLICY_CHECK_SEQUENTIAL_CHARS_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_CHECK_USERNAME_ENABLED":{"name":"PWD_POLICY_CHECK_USERNAME_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_ENABLED":{"name":"PWD_POLICY_ENABLED","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_EXCLUSION_LIST":{"name":"PWD_POLICY_EXCLUSION_LIST","type":"str","level":"advanced","flags":0,"default_value":"osd,host,dashboard,pool,block,nfs,ceph,monitors,gateway,logs,crush,maps","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_COMPLEXITY":{"name":"PWD_POLICY_MIN_COMPLEXITY","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"PWD_POLICY_MIN_LENGTH":{"name":"PWD_POLICY_MIN_LENGTH","type":"int","level":"advanced","flags":0,"default_value":"8","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"REST_REQUESTS_TIMEOUT":{"name":"REST_REQUESTS_TIMEOUT","type":"int","level":"advanced","flags":0,"default_value":"45","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ACCESS_KEY":{"name":"RGW_API_ACCESS_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_ADMIN_RESOURCE":{"name":"RGW_API_ADMIN_RESOURCE","type":"str","level":"advanced","flags":0,"default_value":"admin","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SECRET_KEY":{"name":"RGW_API_SECRET_KEY","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_API_SSL_VERIFY":{"name":"RGW_API_SSL_VERIFY","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"RGW_HOSTNAME_PER_DAEMON":{"name":"RGW_HOSTNAME_PER_DAEMON","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"UNSAFE_TLS_v1_2":{"name":"UNSAFE_TLS_v1_2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_SPAN":{"name":"USER_PWD_EXPIRATION_SPAN","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_1":{"name":"USER_PWD_EXPIRATION_WARNING_1","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"USER_PWD_EXPIRATION_WARNING_2":{"name":"USER_PWD_EXPIRATION_WARNING_2","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"cross_origin_url":{"name":"cross_origin_url","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"crt_file":{"name":"crt_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"debug":{"name":"debug","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable/disable debug options","long_desc":"","tags":[],"see_also":[]},"jwt_token_ttl":{"name":"jwt_token_ttl","type":"int","level":"advanced","flags":0,"default_value":"28800","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"key_file":{"name":"key_file","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"motd":{"name":"motd","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"The message of the day","long_desc":"","tags":[],"see_also":[]},"redirect_resolve_ip_addr":{"name":"redirect_resolve_ip_addr","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":0,"default_value":"8080","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl_server_port":{"name":"ssl_server_port","type":"int","level":"advanced","flags":0,"default_value":"8443","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sso_oauth2":{"name":"sso_oauth2","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":0,"default_value":"redirect","min":"","max":"","enum_allowed":["error","redirect"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":0,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url_prefix":{"name":"url_prefix","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"devicehealth","can_run":true,"error_string":"","module_options":{"enable_monitoring":{"name":"enable_monitoring","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"monitor device health metrics","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mark_out_threshold":{"name":"mark_out_threshold","type":"secs","level":"advanced","flags":1,"default_value":"2419200","min":"","max":"","enum_allowed":[],"desc":"automatically mark OSD if it may fail before this long","long_desc":"","tags":[],"see_also":[]},"pool_name":{"name":"pool_name","type":"str","level":"advanced","flags":1,"default_value":"device_health_metrics","min":"","max":"","enum_allowed":[],"desc":"name of pool in which to store device health metrics","long_desc":"","tags":[],"see_also":[]},"retention_period":{"name":"retention_period","type":"secs","level":"advanced","flags":1,"default_value":"15552000","min":"","max":"","enum_allowed":[],"desc":"how long to retain device health metrics","long_desc":"","tags":[],"see_also":[]},"scrape_frequency":{"name":"scrape_frequency","type":"secs","level":"advanced","flags":1,"default_value":"86400","min":"","max":"","enum_allowed":[],"desc":"how frequently to scrape device health metrics","long_desc":"","tags":[],"see_also":[]},"self_heal":{"name":"self_heal","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"preemptively heal cluster around devices that may fail","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"600","min":"","max":"","enum_allowed":[],"desc":"how frequently to wake up and check device health","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"warn_threshold":{"name":"warn_threshold","type":"secs","level":"advanced","flags":1,"default_value":"7257600","min":"","max":"","enum_allowed":[],"desc":"raise health warning if OSD may fail before this long","long_desc":"","tags":[],"see_also":[]}}},{"name":"influx","can_run":false,"error_string":"influxdb python module not found","module_options":{"batch_size":{"name":"batch_size","type":"int","level":"advanced","flags":0,"default_value":"5000","min":"","max":"","enum_allowed":[],"desc":"How big batches of data points should be when sending to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"database":{"name":"database","type":"str","level":"advanced","flags":0,"default_value":"ceph","min":"","max":"","enum_allowed":[],"desc":"InfluxDB database name. You will need to create this database and grant write privileges to the configured username or the username must have admin privileges to create it.","long_desc":"","tags":[],"see_also":[]},"hostname":{"name":"hostname","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server hostname","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"30","min":"5","max":"","enum_allowed":[],"desc":"Time between reports to InfluxDB. Default 30 seconds.","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"password":{"name":"password","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"password of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"port":{"name":"port","type":"int","level":"advanced","flags":0,"default_value":"8086","min":"","max":"","enum_allowed":[],"desc":"InfluxDB server port","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"ssl":{"name":"ssl","type":"str","level":"advanced","flags":0,"default_value":"false","min":"","max":"","enum_allowed":[],"desc":"Use https connection for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]},"threads":{"name":"threads","type":"int","level":"advanced","flags":0,"default_value":"5","min":"1","max":"32","enum_allowed":[],"desc":"How many worker threads should be spawned for sending data to InfluxDB.","long_desc":"","tags":[],"see_also":[]},"username":{"name":"username","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"username of InfluxDB server user","long_desc":"","tags":[],"see_also":[]},"verify_ssl":{"name":"verify_ssl","type":"str","level":"advanced","flags":0,"default_value":"true","min":"","max":"","enum_allowed":[],"desc":"Verify https cert for InfluxDB server. Use \"true\" or \"false\".","long_desc":"","tags":[],"see_also":[]}}},{"name":"insights","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"iostat","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"k8sevents","can_run":true,"error_string":"","module_options":{"ceph_event_retention_days":{"name":"ceph_event_retention_days","type":"int","level":"advanced","flags":0,"default_value":"7","min":"","max":"","enum_allowed":[],"desc":"Days to hold ceph event information within local cache","long_desc":"","tags":[],"see_also":[]},"config_check_secs":{"name":"config_check_secs","type":"int","level":"advanced","flags":0,"default_value":"10","min":"10","max":"","enum_allowed":[],"desc":"interval (secs) to check for cluster configuration changes","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"localpool","can_run":true,"error_string":"","module_options":{"failure_domain":{"name":"failure_domain","type":"str","level":"advanced","flags":1,"default_value":"host","min":"","max":"","enum_allowed":[],"desc":"failure domain for any created local pool","long_desc":"what failure domain we should separate data replicas across.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"min_size":{"name":"min_size","type":"int","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"default min_size for any created local pool","long_desc":"value to set min_size to (unchanged from Ceph's default if this option is not set)","tags":[],"see_also":[]},"num_rep":{"name":"num_rep","type":"int","level":"advanced","flags":1,"default_value":"3","min":"","max":"","enum_allowed":[],"desc":"default replica count for any created local pool","long_desc":"","tags":[],"see_also":[]},"pg_num":{"name":"pg_num","type":"int","level":"advanced","flags":1,"default_value":"128","min":"","max":"","enum_allowed":[],"desc":"default pg_num for any created local pool","long_desc":"","tags":[],"see_also":[]},"prefix":{"name":"prefix","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"name prefix for any created local pool","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"subtree":{"name":"subtree","type":"str","level":"advanced","flags":1,"default_value":"rack","min":"","max":"","enum_allowed":[],"desc":"CRUSH level for which to create a local pool","long_desc":"which CRUSH subtree type the module should create a pool for.","tags":[],"see_also":[]}}},{"name":"mds_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"mirroring","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"nfs","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"orchestrator","can_run":true,"error_string":"","module_options":{"fail_fs":{"name":"fail_fs","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Fail filesystem for rapid multi-rank mds upgrade","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"orchestrator":{"name":"orchestrator","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["cephadm","rook","test_orchestrator"],"desc":"Orchestrator backend","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_perf_query","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"osd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"pg_autoscaler","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":0,"default_value":"60","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"threshold":{"name":"threshold","type":"float","level":"advanced","flags":0,"default_value":"3.0","min":"1.0","max":"","enum_allowed":[],"desc":"scaling threshold","long_desc":"The factor by which the `NEW PG_NUM` must vary from the current`PG_NUM` before being accepted. Cannot be less than 1.0","tags":[],"see_also":[]}}},{"name":"progress","can_run":true,"error_string":"","module_options":{"allow_pg_recovery_event":{"name":"allow_pg_recovery_event","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow the module to show pg recovery progress","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_completed_events":{"name":"max_completed_events","type":"int","level":"advanced","flags":1,"default_value":"50","min":"","max":"","enum_allowed":[],"desc":"number of past completed events to remember","long_desc":"","tags":[],"see_also":[]},"sleep_interval":{"name":"sleep_interval","type":"secs","level":"advanced","flags":1,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"how long the module is going to sleep","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"prometheus","can_run":true,"error_string":"","module_options":{"cache":{"name":"cache","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"exclude_perf_counters":{"name":"exclude_perf_counters","type":"bool","level":"advanced","flags":1,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Do not include perf-counters in the metrics output","long_desc":"Gathering perf-counters from a single Prometheus exporter can degrade ceph-mgr performance, especially in large clusters. Instead, Ceph-exporter daemons are now used by default for perf-counter gathering. This should only be disabled when no ceph-exporters are deployed.","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools":{"name":"rbd_stats_pools","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rbd_stats_pools_refresh_interval":{"name":"rbd_stats_pools_refresh_interval","type":"int","level":"advanced","flags":0,"default_value":"300","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"scrape_interval":{"name":"scrape_interval","type":"float","level":"advanced","flags":0,"default_value":"15.0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"server_addr":{"name":"server_addr","type":"str","level":"advanced","flags":0,"default_value":"::","min":"","max":"","enum_allowed":[],"desc":"the IPv4 or IPv6 address on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"server_port":{"name":"server_port","type":"int","level":"advanced","flags":1,"default_value":"9283","min":"","max":"","enum_allowed":[],"desc":"the port on which the module listens for HTTP requests","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"stale_cache_strategy":{"name":"stale_cache_strategy","type":"str","level":"advanced","flags":0,"default_value":"log","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_behaviour":{"name":"standby_behaviour","type":"str","level":"advanced","flags":1,"default_value":"default","min":"","max":"","enum_allowed":["default","error"],"desc":"","long_desc":"","tags":[],"see_also":[]},"standby_error_status_code":{"name":"standby_error_status_code","type":"int","level":"advanced","flags":1,"default_value":"500","min":"400","max":"599","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rbd_support","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_snap_create":{"name":"max_concurrent_snap_create","type":"int","level":"advanced","flags":0,"default_value":"10","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"mirror_snapshot_schedule":{"name":"mirror_snapshot_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"trash_purge_schedule":{"name":"trash_purge_schedule","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rgw","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"secondary_zone_period_retry_limit":{"name":"secondary_zone_period_retry_limit","type":"int","level":"advanced","flags":0,"default_value":"5","min":"","max":"","enum_allowed":[],"desc":"RGW module period update retry limit for secondary site","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"rook","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"prometheus_tls_secret_name":{"name":"prometheus_tls_secret_name","type":"str","level":"advanced","flags":0,"default_value":"rook-ceph-prometheus-server-tls","min":"","max":"","enum_allowed":[],"desc":"name of tls secret in k8s for prometheus","long_desc":"","tags":[],"see_also":[]},"secure_monitoring_stack":{"name":"secure_monitoring_stack","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Enable TLS security for all the monitoring stack daemons","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"storage_class":{"name":"storage_class","type":"str","level":"advanced","flags":0,"default_value":"local","min":"","max":"","enum_allowed":[],"desc":"storage class name for LSO-discovered PVs","long_desc":"","tags":[],"see_also":[]}}},{"name":"selftest","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption1":{"name":"roption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"roption2":{"name":"roption2","type":"str","level":"advanced","flags":0,"default_value":"xyz","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption1":{"name":"rwoption1","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption2":{"name":"rwoption2","type":"int","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption3":{"name":"rwoption3","type":"float","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption4":{"name":"rwoption4","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption5":{"name":"rwoption5","type":"bool","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption6":{"name":"rwoption6","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"rwoption7":{"name":"rwoption7","type":"int","level":"advanced","flags":0,"default_value":"","min":"1","max":"42","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testkey":{"name":"testkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testlkey":{"name":"testlkey","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"testnewline":{"name":"testnewline","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"smb","can_run":true,"error_string":"","module_options":{"internal_store_backend":{"name":"internal_store_backend","type":"str","level":"dev","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"set internal store backend. for develoment and testing only","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"update_orchestration":{"name":"update_orchestration","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"automatically update orchestration when smb resources are changed","long_desc":"","tags":[],"see_also":[]}}},{"name":"snap_schedule","can_run":true,"error_string":"","module_options":{"allow_m_granularity":{"name":"allow_m_granularity","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"allow minute scheduled snapshots","long_desc":"","tags":[],"see_also":[]},"dump_on_update":{"name":"dump_on_update","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"dump database to debug log on update","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"stats","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"status","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telegraf","can_run":true,"error_string":"","module_options":{"address":{"name":"address","type":"str","level":"advanced","flags":0,"default_value":"unixgram:///tmp/telegraf.sock","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"secs","level":"advanced","flags":0,"default_value":"15","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"telemetry","can_run":true,"error_string":"","module_options":{"channel_basic":{"name":"channel_basic","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share basic cluster information (size, version)","long_desc":"","tags":[],"see_also":[]},"channel_crash":{"name":"channel_crash","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share metadata about Ceph daemon crashes (version, stack straces, etc)","long_desc":"","tags":[],"see_also":[]},"channel_device":{"name":"channel_device","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Share device health metrics (e.g., SMART data, minus potentially identifying info like serial numbers)","long_desc":"","tags":[],"see_also":[]},"channel_ident":{"name":"channel_ident","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share a user-provided description and/or contact email for the cluster","long_desc":"","tags":[],"see_also":[]},"channel_perf":{"name":"channel_perf","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Share various performance metrics of a cluster","long_desc":"","tags":[],"see_also":[]},"contact":{"name":"contact","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"description":{"name":"description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"device_url":{"name":"device_url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/device","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"enabled":{"name":"enabled","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"interval":{"name":"interval","type":"int","level":"advanced","flags":0,"default_value":"24","min":"8","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"last_opt_revision":{"name":"last_opt_revision","type":"int","level":"advanced","flags":0,"default_value":"1","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard":{"name":"leaderboard","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"leaderboard_description":{"name":"leaderboard_description","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"organization":{"name":"organization","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"proxy":{"name":"proxy","type":"str","level":"advanced","flags":0,"default_value":"","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"url":{"name":"url","type":"str","level":"advanced","flags":0,"default_value":"https://telemetry.ceph.com/report","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"test_orchestrator","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}},{"name":"volumes","can_run":true,"error_string":"","module_options":{"log_level":{"name":"log_level","type":"str","level":"advanced","flags":1,"default_value":"","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster":{"name":"log_to_cluster","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_cluster_level":{"name":"log_to_cluster_level","type":"str","level":"advanced","flags":1,"default_value":"info","min":"","max":"","enum_allowed":["","critical","debug","error","info","warning"],"desc":"","long_desc":"","tags":[],"see_also":[]},"log_to_file":{"name":"log_to_file","type":"bool","level":"advanced","flags":1,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]},"max_concurrent_clones":{"name":"max_concurrent_clones","type":"int","level":"advanced","flags":0,"default_value":"4","min":"","max":"","enum_allowed":[],"desc":"Number of asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_cloning":{"name":"pause_cloning","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous cloner threads","long_desc":"","tags":[],"see_also":[]},"pause_purging":{"name":"pause_purging","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Pause asynchronous subvolume purge threads","long_desc":"","tags":[],"see_also":[]},"periodic_async_work":{"name":"periodic_async_work","type":"bool","level":"advanced","flags":0,"default_value":"False","min":"","max":"","enum_allowed":[],"desc":"Periodically check for async work","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_delay":{"name":"snapshot_clone_delay","type":"int","level":"advanced","flags":0,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"Delay clone begin operation by snapshot_clone_delay seconds","long_desc":"","tags":[],"see_also":[]},"snapshot_clone_no_wait":{"name":"snapshot_clone_no_wait","type":"bool","level":"advanced","flags":0,"default_value":"True","min":"","max":"","enum_allowed":[],"desc":"Reject subvolume clone request when cloner threads are busy","long_desc":"","tags":[],"see_also":[]},"sqlite3_killpoint":{"name":"sqlite3_killpoint","type":"int","level":"dev","flags":1,"default_value":"0","min":"","max":"","enum_allowed":[],"desc":"","long_desc":"","tags":[],"see_also":[]}}}],"services":{"dashboard":"https://192.168.123.106:8443/","prometheus":"http://192.168.123.106:9283/"},"always_on_modules":{"octopus":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"pacific":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"quincy":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"reef":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"squid":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"],"tentacle":["balancer","crash","devicehealth","orchestrator","pg_autoscaler","progress","rbd_support","status","telemetry","volumes"]},"force_disabled_modules":{},"last_failure_osd_epoch":5,"active_clients":[{"name":"devicehealth","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":288007894}]},{"name":"libcephsqlite","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":1780413912}]},{"name":"rbd_support","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":2278080462}]},{"name":"volumes","addrvec":[{"type":"v2","addr":"192.168.123.106:0","nonce":1957478106}]}]} 2026-04-15T13:35:04.102 INFO:tasks.cephadm.ceph_manager.ceph:mgr available! 2026-04-15T13:35:04.102 INFO:tasks.cephadm.ceph_manager.ceph:waiting for all up 2026-04-15T13:35:04.102 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd dump --format=json 2026-04-15T13:35:04.352 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: cephadm 2026-04-15T13:35:03.045059+0000 mgr.vm06.qbbldl (mgr.14229) 87 : cephadm [INF] Detected new or changed devices on vm09 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: cephadm 2026-04-15T13:35:03.045059+0000 mgr.vm06.qbbldl (mgr.14229) 87 : cephadm [INF] Detected new or changed devices on vm09 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.059397+0000 mon.vm06 (mon.0) 639 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.059397+0000 mon.vm06 (mon.0) 639 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: cluster 2026-04-15T13:35:03.060070+0000 mon.vm06 (mon.0) 640 : cluster [DBG] mgrmap e19: vm06.qbbldl(active, since 54s), standbys: vm09.kpawde 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: cluster 2026-04-15T13:35:03.060070+0000 mon.vm06 (mon.0) 640 : cluster [DBG] mgrmap e19: vm06.qbbldl(active, since 54s), standbys: vm09.kpawde 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.066286+0000 mon.vm06 (mon.0) 641 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.066286+0000 mon.vm06 (mon.0) 641 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.067065+0000 mon.vm06 (mon.0) 642 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.067065+0000 mon.vm06 (mon.0) 642 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.259425+0000 mon.vm06 (mon.0) 643 : audit [DBG] from='client.? 192.168.123.106:0/1474892284' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.259425+0000 mon.vm06 (mon.0) 643 : audit [DBG] from='client.? 192.168.123.106:0/1474892284' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.833392+0000 mon.vm06 (mon.0) 644 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.833392+0000 mon.vm06 (mon.0) 644 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.838509+0000 mon.vm06 (mon.0) 645 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.838509+0000 mon.vm06 (mon.0) 645 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.839625+0000 mon.vm06 (mon.0) 646 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.839625+0000 mon.vm06 (mon.0) 646 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.840369+0000 mon.vm06 (mon.0) 647 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.840369+0000 mon.vm06 (mon.0) 647 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.840900+0000 mon.vm06 (mon.0) 648 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.840900+0000 mon.vm06 (mon.0) 648 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.845015+0000 mon.vm06 (mon.0) 649 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.845015+0000 mon.vm06 (mon.0) 649 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.847307+0000 mon.vm06 (mon.0) 650 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:03.847307+0000 mon.vm06 (mon.0) 650 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:04.030290+0000 mon.vm09 (mon.1) 26 : audit [DBG] from='client.? 192.168.123.106:0/1213526700' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-15T13:35:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:04 vm09 bash[34466]: audit 2026-04-15T13:35:04.030290+0000 mon.vm09 (mon.1) 26 : audit [DBG] from='client.? 192.168.123.106:0/1213526700' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-15T13:35:04.372 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: cephadm 2026-04-15T13:35:03.045059+0000 mgr.vm06.qbbldl (mgr.14229) 87 : cephadm [INF] Detected new or changed devices on vm09 2026-04-15T13:35:04.372 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: cephadm 2026-04-15T13:35:03.045059+0000 mgr.vm06.qbbldl (mgr.14229) 87 : cephadm [INF] Detected new or changed devices on vm09 2026-04-15T13:35:04.372 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.059397+0000 mon.vm06 (mon.0) 639 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.059397+0000 mon.vm06 (mon.0) 639 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: cluster 2026-04-15T13:35:03.060070+0000 mon.vm06 (mon.0) 640 : cluster [DBG] mgrmap e19: vm06.qbbldl(active, since 54s), standbys: vm09.kpawde 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: cluster 2026-04-15T13:35:03.060070+0000 mon.vm06 (mon.0) 640 : cluster [DBG] mgrmap e19: vm06.qbbldl(active, since 54s), standbys: vm09.kpawde 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.066286+0000 mon.vm06 (mon.0) 641 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.066286+0000 mon.vm06 (mon.0) 641 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.067065+0000 mon.vm06 (mon.0) 642 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.067065+0000 mon.vm06 (mon.0) 642 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.259425+0000 mon.vm06 (mon.0) 643 : audit [DBG] from='client.? 192.168.123.106:0/1474892284' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.259425+0000 mon.vm06 (mon.0) 643 : audit [DBG] from='client.? 192.168.123.106:0/1474892284' entity='client.admin' cmd={"prefix": "config log", "num": 1, "format": "json"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.833392+0000 mon.vm06 (mon.0) 644 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.833392+0000 mon.vm06 (mon.0) 644 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.838509+0000 mon.vm06 (mon.0) 645 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.838509+0000 mon.vm06 (mon.0) 645 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.839625+0000 mon.vm06 (mon.0) 646 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.839625+0000 mon.vm06 (mon.0) 646 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.840369+0000 mon.vm06 (mon.0) 647 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.840369+0000 mon.vm06 (mon.0) 647 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.840900+0000 mon.vm06 (mon.0) 648 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.840900+0000 mon.vm06 (mon.0) 648 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.845015+0000 mon.vm06 (mon.0) 649 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.845015+0000 mon.vm06 (mon.0) 649 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.847307+0000 mon.vm06 (mon.0) 650 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:03.847307+0000 mon.vm06 (mon.0) 650 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:04.030290+0000 mon.vm09 (mon.1) 26 : audit [DBG] from='client.? 192.168.123.106:0/1213526700' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-15T13:35:04.373 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:04 vm06 bash[28114]: audit 2026-04-15T13:35:04.030290+0000 mon.vm09 (mon.1) 26 : audit [DBG] from='client.? 192.168.123.106:0/1213526700' entity='client.admin' cmd={"prefix": "mgr dump", "format": "json"} : dispatch 2026-04-15T13:35:04.731 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:04.731 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":24,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","created":"2026-04-15T13:32:40.722525+0000","modified":"2026-04-15T13:35:02.041861+0000","last_up_change":"2026-04-15T13:34:59.022547+0000","last_in_change":"2026-04-15T13:34:43.989611+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":9,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-15T13:34:56.455012+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"84143048-0065-4d27-be49-d79e8113f54d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6801","nonce":1671722834}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6803","nonce":1671722834}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6807","nonce":1671722834}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6805","nonce":1671722834}]},"public_addr":"192.168.123.109:6801/1671722834","cluster_addr":"192.168.123.109:6803/1671722834","heartbeat_back_addr":"192.168.123.109:6807/1671722834","heartbeat_front_addr":"192.168.123.109:6805/1671722834","state":["exists","up"]},{"osd":1,"uuid":"ef108fe4-72aa-4087-a097-576f30c554c6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":19,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6803","nonce":462013235}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6805","nonce":462013235}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6809","nonce":462013235}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6807","nonce":462013235}]},"public_addr":"192.168.123.106:6803/462013235","cluster_addr":"192.168.123.106:6805/462013235","heartbeat_back_addr":"192.168.123.106:6809/462013235","heartbeat_front_addr":"192.168.123.106:6807/462013235","state":["exists","up"]},{"osd":2,"uuid":"549138af-e58f-4394-97f4-7a8c79a44f47","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6809","nonce":1579250882}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6811","nonce":1579250882}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6815","nonce":1579250882}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6813","nonce":1579250882}]},"public_addr":"192.168.123.109:6809/1579250882","cluster_addr":"192.168.123.109:6811/1579250882","heartbeat_back_addr":"192.168.123.109:6815/1579250882","heartbeat_front_addr":"192.168.123.109:6813/1579250882","state":["exists","up"]},{"osd":3,"uuid":"dd7c0f1b-de8e-46ef-adf1-1743157ee826","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6811","nonce":2627160607}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6813","nonce":2627160607}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6817","nonce":2627160607}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6815","nonce":2627160607}]},"public_addr":"192.168.123.106:6811/2627160607","cluster_addr":"192.168.123.106:6813/2627160607","heartbeat_back_addr":"192.168.123.106:6817/2627160607","heartbeat_front_addr":"192.168.123.106:6815/2627160607","state":["exists","up"]},{"osd":4,"uuid":"9acb1f73-ff77-43b9-839d-40df5a4f00f9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6817","nonce":489839388}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6819","nonce":489839388}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6823","nonce":489839388}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6821","nonce":489839388}]},"public_addr":"192.168.123.109:6817/489839388","cluster_addr":"192.168.123.109:6819/489839388","heartbeat_back_addr":"192.168.123.109:6823/489839388","heartbeat_front_addr":"192.168.123.109:6821/489839388","state":["exists","up"]},{"osd":5,"uuid":"0c8f1316-78b2-4ecc-a5cc-29a4a232978f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6819","nonce":3054582361}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6821","nonce":3054582361}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6825","nonce":3054582361}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6823","nonce":3054582361}]},"public_addr":"192.168.123.106:6819/3054582361","cluster_addr":"192.168.123.106:6821/3054582361","heartbeat_back_addr":"192.168.123.106:6825/3054582361","heartbeat_front_addr":"192.168.123.106:6823/3054582361","state":["exists","up"]},{"osd":6,"uuid":"1eeffe1e-e81f-4304-8758-28d246c7bbc1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6825","nonce":2064572425}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6827","nonce":2064572425}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6831","nonce":2064572425}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6829","nonce":2064572425}]},"public_addr":"192.168.123.109:6825/2064572425","cluster_addr":"192.168.123.109:6827/2064572425","heartbeat_back_addr":"192.168.123.109:6831/2064572425","heartbeat_front_addr":"192.168.123.109:6829/2064572425","state":["exists","up"]},{"osd":7,"uuid":"2eefc3e2-2c3a-4ac3-846b-ab224d110bd8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6827","nonce":1155449015}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6829","nonce":1155449015}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6832","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6833","nonce":1155449015}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6831","nonce":1155449015}]},"public_addr":"192.168.123.106:6827/1155449015","cluster_addr":"192.168.123.106:6829/1155449015","heartbeat_back_addr":"192.168.123.106:6833/1155449015","heartbeat_front_addr":"192.168.123.106:6831/1155449015","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:52.338458+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:52.946112+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:53.702956+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:54.530210+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:55.672155+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:56.522391+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:57.133917+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:57.830615+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.106:0/1794388721":"2026-04-16T13:34:08.155627+0000","192.168.123.106:6800/3431857196":"2026-04-16T13:33:08.499683+0000","192.168.123.106:6801/235423043":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6801/3431857196":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/517616836":"2026-04-16T13:34:08.155627+0000","192.168.123.106:0/1351629426":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/4178947106":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/881279385":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/3417962058":"2026-04-16T13:34:08.155627+0000","192.168.123.106:0/901355294":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6800/235423043":"2026-04-16T13:33:29.098096+0000","192.168.123.106:0/3221750867":"2026-04-16T13:33:29.098096+0000","192.168.123.106:0/2993942436":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6800/1064036141":"2026-04-16T13:34:08.155627+0000","192.168.123.106:6801/1064036141":"2026-04-16T13:34:08.155627+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-15T13:35:04.795 INFO:tasks.cephadm.ceph_manager.ceph:all up! 2026-04-15T13:35:04.795 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd dump --format=json 2026-04-15T13:35:05.074 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:05 vm09 bash[34466]: cephadm 2026-04-15T13:35:03.826983+0000 mgr.vm06.qbbldl (mgr.14229) 88 : cephadm [INF] Detected new or changed devices on vm06 2026-04-15T13:35:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:05 vm09 bash[34466]: cephadm 2026-04-15T13:35:03.826983+0000 mgr.vm06.qbbldl (mgr.14229) 88 : cephadm [INF] Detected new or changed devices on vm06 2026-04-15T13:35:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:05 vm09 bash[34466]: cluster 2026-04-15T13:35:04.170049+0000 mgr.vm06.qbbldl (mgr.14229) 89 : cluster [DBG] pgmap v40: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:05 vm09 bash[34466]: cluster 2026-04-15T13:35:04.170049+0000 mgr.vm06.qbbldl (mgr.14229) 89 : cluster [DBG] pgmap v40: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:05 vm09 bash[34466]: audit 2026-04-15T13:35:04.732196+0000 mon.vm06 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.106:0/1986669185' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:05 vm09 bash[34466]: audit 2026-04-15T13:35:04.732196+0000 mon.vm06 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.106:0/1986669185' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:05.469 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:05.469 INFO:teuthology.orchestra.run.vm06.stdout:{"epoch":24,"fsid":"75e42418-38cf-11f1-9300-4fe77ac4445b","created":"2026-04-15T13:32:40.722525+0000","modified":"2026-04-15T13:35:02.041861+0000","last_up_change":"2026-04-15T13:34:59.022547+0000","last_in_change":"2026-04-15T13:34:43.989611+0000","flags":"sortbitwise,recovery_deletes,purged_snapdirs,pglog_hardlimit","flags_num":5799936,"flags_set":["pglog_hardlimit","purged_snapdirs","recovery_deletes","sortbitwise"],"crush_version":9,"full_ratio":0.94999998807907104,"backfillfull_ratio":0.89999997615814209,"nearfull_ratio":0.85000002384185791,"cluster_snapshot":"","pool_max":1,"max_osd":8,"require_min_compat_client":"luminous","min_compat_client":"jewel","require_osd_release":"tentacle","allow_crimson":false,"pools":[{"pool":1,"pool_name":".mgr","create_time":"2026-04-15T13:34:56.455012+0000","flags":1,"flags_names":"hashpspool","type":1,"size":3,"min_size":2,"crush_rule":0,"peering_crush_bucket_count":0,"peering_crush_bucket_target":0,"peering_crush_bucket_barrier":0,"peering_crush_bucket_mandatory_member":2147483647,"is_stretch_pool":false,"object_hash":2,"pg_autoscale_mode":"off","pg_num":1,"pg_placement_num":1,"pg_placement_num_target":1,"pg_num_target":1,"pg_num_pending":1,"last_pg_merge_meta":{"source_pgid":"0.0","ready_epoch":0,"last_epoch_started":0,"last_epoch_clean":0,"source_version":"0'0","target_version":"0'0"},"last_change":"24","last_force_op_resend":"0","last_force_op_resend_prenautilus":"0","last_force_op_resend_preluminous":"0","auid":0,"snap_mode":"selfmanaged","snap_seq":0,"snap_epoch":0,"pool_snaps":[],"removed_snaps":"[]","quota_max_bytes":0,"quota_max_objects":0,"tiers":[],"tier_of":-1,"read_tier":-1,"write_tier":-1,"cache_mode":"none","target_max_bytes":0,"target_max_objects":0,"cache_target_dirty_ratio_micro":400000,"cache_target_dirty_high_ratio_micro":600000,"cache_target_full_ratio_micro":800000,"cache_min_flush_age":0,"cache_min_evict_age":0,"erasure_code_profile":"","hit_set_params":{"type":"none"},"hit_set_period":0,"hit_set_count":0,"use_gmt_hitset":true,"min_read_recency_for_promote":0,"min_write_recency_for_promote":0,"hit_set_grade_decay_rate":0,"hit_set_search_last_n":0,"grade_table":[],"stripe_width":0,"expected_num_objects":0,"fast_read":false,"nonprimary_shards":"{}","options":{"pg_num_max":32,"pg_num_min":1},"application_metadata":{"mgr":{}},"read_balance":{"score_type":"Fair distribution","score_acting":7.8899998664855957,"score_stable":7.8899998664855957,"optimal_score":0.37999999523162842,"raw_score_acting":3,"raw_score_stable":3,"primary_affinity_weighted":1,"average_primary_affinity":1,"average_primary_affinity_weighted":1}}],"osds":[{"osd":0,"uuid":"84143048-0065-4d27-be49-d79e8113f54d","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":21,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6800","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6801","nonce":1671722834}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6802","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6803","nonce":1671722834}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6806","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6807","nonce":1671722834}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6804","nonce":1671722834},{"type":"v1","addr":"192.168.123.109:6805","nonce":1671722834}]},"public_addr":"192.168.123.109:6801/1671722834","cluster_addr":"192.168.123.109:6803/1671722834","heartbeat_back_addr":"192.168.123.109:6807/1671722834","heartbeat_front_addr":"192.168.123.109:6805/1671722834","state":["exists","up"]},{"osd":1,"uuid":"ef108fe4-72aa-4087-a097-576f30c554c6","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":16,"up_thru":19,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6802","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6803","nonce":462013235}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6804","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6805","nonce":462013235}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6808","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6809","nonce":462013235}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6806","nonce":462013235},{"type":"v1","addr":"192.168.123.106:6807","nonce":462013235}]},"public_addr":"192.168.123.106:6803/462013235","cluster_addr":"192.168.123.106:6805/462013235","heartbeat_back_addr":"192.168.123.106:6809/462013235","heartbeat_front_addr":"192.168.123.106:6807/462013235","state":["exists","up"]},{"osd":2,"uuid":"549138af-e58f-4394-97f4-7a8c79a44f47","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":17,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6808","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6809","nonce":1579250882}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6810","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6811","nonce":1579250882}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6814","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6815","nonce":1579250882}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6812","nonce":1579250882},{"type":"v1","addr":"192.168.123.109:6813","nonce":1579250882}]},"public_addr":"192.168.123.109:6809/1579250882","cluster_addr":"192.168.123.109:6811/1579250882","heartbeat_back_addr":"192.168.123.109:6815/1579250882","heartbeat_front_addr":"192.168.123.109:6813/1579250882","state":["exists","up"]},{"osd":3,"uuid":"dd7c0f1b-de8e-46ef-adf1-1743157ee826","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":18,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6810","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6811","nonce":2627160607}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6812","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6813","nonce":2627160607}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6816","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6817","nonce":2627160607}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6814","nonce":2627160607},{"type":"v1","addr":"192.168.123.106:6815","nonce":2627160607}]},"public_addr":"192.168.123.106:6811/2627160607","cluster_addr":"192.168.123.106:6813/2627160607","heartbeat_back_addr":"192.168.123.106:6817/2627160607","heartbeat_front_addr":"192.168.123.106:6815/2627160607","state":["exists","up"]},{"osd":4,"uuid":"9acb1f73-ff77-43b9-839d-40df5a4f00f9","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":19,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6816","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6817","nonce":489839388}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6818","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6819","nonce":489839388}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6822","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6823","nonce":489839388}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6820","nonce":489839388},{"type":"v1","addr":"192.168.123.109:6821","nonce":489839388}]},"public_addr":"192.168.123.109:6817/489839388","cluster_addr":"192.168.123.109:6819/489839388","heartbeat_back_addr":"192.168.123.109:6823/489839388","heartbeat_front_addr":"192.168.123.109:6821/489839388","state":["exists","up"]},{"osd":5,"uuid":"0c8f1316-78b2-4ecc-a5cc-29a4a232978f","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":20,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6818","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6819","nonce":3054582361}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6820","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6821","nonce":3054582361}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6824","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6825","nonce":3054582361}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6822","nonce":3054582361},{"type":"v1","addr":"192.168.123.106:6823","nonce":3054582361}]},"public_addr":"192.168.123.106:6819/3054582361","cluster_addr":"192.168.123.106:6821/3054582361","heartbeat_back_addr":"192.168.123.106:6825/3054582361","heartbeat_front_addr":"192.168.123.106:6823/3054582361","state":["exists","up"]},{"osd":6,"uuid":"1eeffe1e-e81f-4304-8758-28d246c7bbc1","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":0,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6824","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6825","nonce":2064572425}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6826","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6827","nonce":2064572425}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6830","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6831","nonce":2064572425}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.109:6828","nonce":2064572425},{"type":"v1","addr":"192.168.123.109:6829","nonce":2064572425}]},"public_addr":"192.168.123.109:6825/2064572425","cluster_addr":"192.168.123.109:6827/2064572425","heartbeat_back_addr":"192.168.123.109:6831/2064572425","heartbeat_front_addr":"192.168.123.109:6829/2064572425","state":["exists","up"]},{"osd":7,"uuid":"2eefc3e2-2c3a-4ac3-846b-ab224d110bd8","up":1,"in":1,"weight":1,"primary_affinity":1,"last_clean_begin":0,"last_clean_end":0,"up_from":21,"up_thru":22,"down_at":0,"lost_at":0,"public_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6826","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6827","nonce":1155449015}]},"cluster_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6828","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6829","nonce":1155449015}]},"heartbeat_back_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6832","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6833","nonce":1155449015}]},"heartbeat_front_addrs":{"addrvec":[{"type":"v2","addr":"192.168.123.106:6830","nonce":1155449015},{"type":"v1","addr":"192.168.123.106:6831","nonce":1155449015}]},"public_addr":"192.168.123.106:6827/1155449015","cluster_addr":"192.168.123.106:6829/1155449015","heartbeat_back_addr":"192.168.123.106:6833/1155449015","heartbeat_front_addr":"192.168.123.106:6831/1155449015","state":["exists","up"]}],"osd_xinfo":[{"osd":0,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:52.338458+0000","dead_epoch":0},{"osd":1,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:52.946112+0000","dead_epoch":0},{"osd":2,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:53.702956+0000","dead_epoch":0},{"osd":3,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:54.530210+0000","dead_epoch":0},{"osd":4,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:55.672155+0000","dead_epoch":0},{"osd":5,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:56.522391+0000","dead_epoch":0},{"osd":6,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:57.133917+0000","dead_epoch":0},{"osd":7,"down_stamp":"0.000000","laggy_probability":0,"laggy_interval":0,"features":4541880224203014143,"old_weight":0,"last_purged_snaps_scrub":"2026-04-15T13:34:57.830615+0000","dead_epoch":0}],"pg_upmap":[],"pg_upmap_items":[],"pg_upmap_primaries":[],"pg_temp":[],"primary_temp":[],"blocklist":{"192.168.123.106:0/1794388721":"2026-04-16T13:34:08.155627+0000","192.168.123.106:6800/3431857196":"2026-04-16T13:33:08.499683+0000","192.168.123.106:6801/235423043":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6801/3431857196":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/517616836":"2026-04-16T13:34:08.155627+0000","192.168.123.106:0/1351629426":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/4178947106":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/881279385":"2026-04-16T13:33:08.499683+0000","192.168.123.106:0/3417962058":"2026-04-16T13:34:08.155627+0000","192.168.123.106:0/901355294":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6800/235423043":"2026-04-16T13:33:29.098096+0000","192.168.123.106:0/3221750867":"2026-04-16T13:33:29.098096+0000","192.168.123.106:0/2993942436":"2026-04-16T13:33:29.098096+0000","192.168.123.106:6800/1064036141":"2026-04-16T13:34:08.155627+0000","192.168.123.106:6801/1064036141":"2026-04-16T13:34:08.155627+0000"},"range_blocklist":{},"erasure_code_profiles":{"default":{"crush-failure-domain":"osd","k":"2","m":"1","plugin":"isa","technique":"reed_sol_van"}},"removed_snaps_queue":[],"new_removed_snaps":[],"new_purged_snaps":[],"crush_node_flags":{},"device_class_flags":{},"stretch_mode":{"stretch_mode_enabled":false,"stretch_bucket_count":0,"degraded_stretch_mode":0,"recovering_stretch_mode":0,"stretch_mode_bucket":0}} 2026-04-15T13:35:05.482 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:05 vm06 bash[28114]: cephadm 2026-04-15T13:35:03.826983+0000 mgr.vm06.qbbldl (mgr.14229) 88 : cephadm [INF] Detected new or changed devices on vm06 2026-04-15T13:35:05.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:05 vm06 bash[28114]: cephadm 2026-04-15T13:35:03.826983+0000 mgr.vm06.qbbldl (mgr.14229) 88 : cephadm [INF] Detected new or changed devices on vm06 2026-04-15T13:35:05.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:05 vm06 bash[28114]: cluster 2026-04-15T13:35:04.170049+0000 mgr.vm06.qbbldl (mgr.14229) 89 : cluster [DBG] pgmap v40: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:05.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:05 vm06 bash[28114]: cluster 2026-04-15T13:35:04.170049+0000 mgr.vm06.qbbldl (mgr.14229) 89 : cluster [DBG] pgmap v40: 1 pgs: 1 active+clean; 577 KiB data, 214 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:05.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:05 vm06 bash[28114]: audit 2026-04-15T13:35:04.732196+0000 mon.vm06 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.106:0/1986669185' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:05.483 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:05 vm06 bash[28114]: audit 2026-04-15T13:35:04.732196+0000 mon.vm06 (mon.0) 651 : audit [DBG] from='client.? 192.168.123.106:0/1986669185' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:05.565 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.0 flush_pg_stats 2026-04-15T13:35:05.565 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.1 flush_pg_stats 2026-04-15T13:35:05.565 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.2 flush_pg_stats 2026-04-15T13:35:05.565 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.3 flush_pg_stats 2026-04-15T13:35:05.565 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.4 flush_pg_stats 2026-04-15T13:35:05.565 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.5 flush_pg_stats 2026-04-15T13:35:05.566 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.6 flush_pg_stats 2026-04-15T13:35:05.566 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph tell osd.7 flush_pg_stats 2026-04-15T13:35:06.122 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.130 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.130 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.159 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.159 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.173 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.196 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.201 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:06.344 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:06 vm06 bash[28114]: audit 2026-04-15T13:35:05.469331+0000 mon.vm06 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.106:0/3009823170' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:06.344 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:06 vm06 bash[28114]: audit 2026-04-15T13:35:05.469331+0000 mon.vm06 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.106:0/3009823170' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:06 vm09 bash[34466]: audit 2026-04-15T13:35:05.469331+0000 mon.vm06 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.106:0/3009823170' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:06 vm09 bash[34466]: audit 2026-04-15T13:35:05.469331+0000 mon.vm06 (mon.0) 652 : audit [DBG] from='client.? 192.168.123.106:0/3009823170' entity='client.admin' cmd={"prefix": "osd dump", "format": "json"} : dispatch 2026-04-15T13:35:06.777 INFO:teuthology.orchestra.run.vm06.stdout:68719476740 2026-04-15T13:35:06.777 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.0 2026-04-15T13:35:06.894 INFO:teuthology.orchestra.run.vm06.stdout:68719476740 2026-04-15T13:35:06.894 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.1 2026-04-15T13:35:06.909 INFO:teuthology.orchestra.run.vm06.stdout:90194313220 2026-04-15T13:35:06.910 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.6 2026-04-15T13:35:06.931 INFO:teuthology.orchestra.run.vm06.stdout:85899345923 2026-04-15T13:35:06.931 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.5 2026-04-15T13:35:06.974 INFO:teuthology.orchestra.run.vm06.stdout:77309411331 2026-04-15T13:35:06.974 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.3 2026-04-15T13:35:07.147 INFO:teuthology.orchestra.run.vm06.stdout:81604378627 2026-04-15T13:35:07.147 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.4 2026-04-15T13:35:07.149 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.167 INFO:teuthology.orchestra.run.vm06.stdout:73014444036 2026-04-15T13:35:07.167 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.2 2026-04-15T13:35:07.186 INFO:teuthology.orchestra.run.vm06.stdout:90194313219 2026-04-15T13:35:07.186 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.7 2026-04-15T13:35:07.232 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:07 vm06 bash[28114]: cluster 2026-04-15T13:35:06.170264+0000 mgr.vm06.qbbldl (mgr.14229) 90 : cluster [DBG] pgmap v41: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:07.232 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:07 vm06 bash[28114]: cluster 2026-04-15T13:35:06.170264+0000 mgr.vm06.qbbldl (mgr.14229) 90 : cluster [DBG] pgmap v41: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:07.318 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:07 vm09 bash[34466]: cluster 2026-04-15T13:35:06.170264+0000 mgr.vm06.qbbldl (mgr.14229) 90 : cluster [DBG] pgmap v41: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:07 vm09 bash[34466]: cluster 2026-04-15T13:35:06.170264+0000 mgr.vm06.qbbldl (mgr.14229) 90 : cluster [DBG] pgmap v41: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:07.384 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.427 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.604 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.611 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.711 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.723 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:07.900 INFO:teuthology.orchestra.run.vm06.stdout:68719476739 2026-04-15T13:35:08.004 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476740 got 68719476739 for osd.0 2026-04-15T13:35:08.076 INFO:teuthology.orchestra.run.vm06.stdout:68719476739 2026-04-15T13:35:08.192 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:08 vm06 bash[28114]: audit 2026-04-15T13:35:07.886486+0000 mon.vm06 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.106:0/2316958366' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:08.192 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:08 vm06 bash[28114]: audit 2026-04-15T13:35:07.886486+0000 mon.vm06 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.106:0/2316958366' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:08.192 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:08 vm06 bash[28114]: audit 2026-04-15T13:35:08.068659+0000 mon.vm06 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.106:0/2190475189' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:08.192 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:08 vm06 bash[28114]: audit 2026-04-15T13:35:08.068659+0000 mon.vm06 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.106:0/2190475189' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:08.262 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476740 got 68719476739 for osd.1 2026-04-15T13:35:08.328 INFO:teuthology.orchestra.run.vm06.stdout:90194313219 2026-04-15T13:35:08.345 INFO:teuthology.orchestra.run.vm06.stdout:85899345922 2026-04-15T13:35:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:08 vm09 bash[34466]: audit 2026-04-15T13:35:07.886486+0000 mon.vm06 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.106:0/2316958366' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:08 vm09 bash[34466]: audit 2026-04-15T13:35:07.886486+0000 mon.vm06 (mon.0) 653 : audit [DBG] from='client.? 192.168.123.106:0/2316958366' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:08 vm09 bash[34466]: audit 2026-04-15T13:35:08.068659+0000 mon.vm06 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.106:0/2190475189' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:08 vm09 bash[34466]: audit 2026-04-15T13:35:08.068659+0000 mon.vm06 (mon.0) 654 : audit [DBG] from='client.? 192.168.123.106:0/2190475189' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:08.432 INFO:teuthology.orchestra.run.vm06.stdout:81604378626 2026-04-15T13:35:08.445 INFO:teuthology.orchestra.run.vm06.stdout:73014444035 2026-04-15T13:35:08.485 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345923 got 85899345922 for osd.5 2026-04-15T13:35:08.496 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313220 got 90194313219 for osd.6 2026-04-15T13:35:08.501 INFO:teuthology.orchestra.run.vm06.stdout:77309411330 2026-04-15T13:35:08.544 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378627 got 81604378626 for osd.4 2026-04-15T13:35:08.548 INFO:teuthology.orchestra.run.vm06.stdout:90194313218 2026-04-15T13:35:08.631 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411331 got 77309411330 for osd.3 2026-04-15T13:35:08.649 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444036 got 73014444035 for osd.2 2026-04-15T13:35:08.682 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313219 got 90194313218 for osd.7 2026-04-15T13:35:09.005 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.0 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: cluster 2026-04-15T13:35:08.170513+0000 mgr.vm06.qbbldl (mgr.14229) 91 : cluster [DBG] pgmap v42: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: cluster 2026-04-15T13:35:08.170513+0000 mgr.vm06.qbbldl (mgr.14229) 91 : cluster [DBG] pgmap v42: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.329047+0000 mon.vm06 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.106:0/2819122301' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.329047+0000 mon.vm06 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.106:0/2819122301' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.346252+0000 mon.vm06 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.106:0/599165349' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.346252+0000 mon.vm06 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.106:0/599165349' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.429771+0000 mon.vm06 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.106:0/2215773918' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.429771+0000 mon.vm06 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.106:0/2215773918' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.442309+0000 mon.vm06 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.106:0/3551549418' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.442309+0000 mon.vm06 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.106:0/3551549418' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.481126+0000 mon.vm06 (mon.0) 659 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.481126+0000 mon.vm06 (mon.0) 659 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.495756+0000 mon.vm09 (mon.1) 27 : audit [DBG] from='client.? 192.168.123.106:0/2224102874' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.495756+0000 mon.vm09 (mon.1) 27 : audit [DBG] from='client.? 192.168.123.106:0/2224102874' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.547821+0000 mon.vm06 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.106:0/1282337383' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:09.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:09 vm06 bash[28114]: audit 2026-04-15T13:35:08.547821+0000 mon.vm06 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.106:0/1282337383' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:09.262 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.1 2026-04-15T13:35:09.278 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: cluster 2026-04-15T13:35:08.170513+0000 mgr.vm06.qbbldl (mgr.14229) 91 : cluster [DBG] pgmap v42: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: cluster 2026-04-15T13:35:08.170513+0000 mgr.vm06.qbbldl (mgr.14229) 91 : cluster [DBG] pgmap v42: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.329047+0000 mon.vm06 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.106:0/2819122301' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.329047+0000 mon.vm06 (mon.0) 655 : audit [DBG] from='client.? 192.168.123.106:0/2819122301' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.346252+0000 mon.vm06 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.106:0/599165349' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.346252+0000 mon.vm06 (mon.0) 656 : audit [DBG] from='client.? 192.168.123.106:0/599165349' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.429771+0000 mon.vm06 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.106:0/2215773918' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.429771+0000 mon.vm06 (mon.0) 657 : audit [DBG] from='client.? 192.168.123.106:0/2215773918' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.442309+0000 mon.vm06 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.106:0/3551549418' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.442309+0000 mon.vm06 (mon.0) 658 : audit [DBG] from='client.? 192.168.123.106:0/3551549418' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.481126+0000 mon.vm06 (mon.0) 659 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.481126+0000 mon.vm06 (mon.0) 659 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.495756+0000 mon.vm09 (mon.1) 27 : audit [DBG] from='client.? 192.168.123.106:0/2224102874' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.495756+0000 mon.vm09 (mon.1) 27 : audit [DBG] from='client.? 192.168.123.106:0/2224102874' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.547821+0000 mon.vm06 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.106:0/1282337383' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:09 vm09 bash[34466]: audit 2026-04-15T13:35:08.547821+0000 mon.vm06 (mon.0) 660 : audit [DBG] from='client.? 192.168.123.106:0/1282337383' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:09.486 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.5 2026-04-15T13:35:09.496 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.6 2026-04-15T13:35:09.543 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:09.545 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.4 2026-04-15T13:35:09.632 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.3 2026-04-15T13:35:09.650 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.2 2026-04-15T13:35:09.683 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph osd last-stat-seq osd.7 2026-04-15T13:35:09.705 INFO:teuthology.orchestra.run.vm06.stdout:68719476740 2026-04-15T13:35:09.802 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:09.934 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:09.936 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:10.007 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476740 got 68719476740 for osd.0 2026-04-15T13:35:10.008 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:10.008 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.187 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:10.199 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:10.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:10 vm06 bash[28114]: audit 2026-04-15T13:35:09.701256+0000 mon.vm06 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.106:0/3473398421' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:10.252 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:10 vm06 bash[28114]: audit 2026-04-15T13:35:09.701256+0000 mon.vm06 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.106:0/3473398421' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:10.310 INFO:teuthology.orchestra.run.vm06.stdout:68719476740 2026-04-15T13:35:10.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:10 vm09 bash[34466]: audit 2026-04-15T13:35:09.701256+0000 mon.vm06 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.106:0/3473398421' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:10.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:10 vm09 bash[34466]: audit 2026-04-15T13:35:09.701256+0000 mon.vm06 (mon.0) 661 : audit [DBG] from='client.? 192.168.123.106:0/3473398421' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 0} : dispatch 2026-04-15T13:35:10.486 INFO:tasks.cephadm.ceph_manager.ceph:need seq 68719476740 got 68719476740 for osd.1 2026-04-15T13:35:10.486 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.607 INFO:teuthology.orchestra.run.vm06.stdout:85899345924 2026-04-15T13:35:10.644 INFO:teuthology.orchestra.run.vm06.stdout:81604378628 2026-04-15T13:35:10.734 INFO:teuthology.orchestra.run.vm06.stdout:90194313220 2026-04-15T13:35:10.766 INFO:teuthology.orchestra.run.vm06.stdout:73014444036 2026-04-15T13:35:10.805 INFO:tasks.cephadm.ceph_manager.ceph:need seq 85899345923 got 85899345924 for osd.5 2026-04-15T13:35:10.805 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.805 INFO:tasks.cephadm.ceph_manager.ceph:need seq 81604378627 got 81604378628 for osd.4 2026-04-15T13:35:10.805 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.811 INFO:teuthology.orchestra.run.vm06.stdout:90194313219 2026-04-15T13:35:10.862 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313220 got 90194313220 for osd.6 2026-04-15T13:35:10.862 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.868 INFO:teuthology.orchestra.run.vm06.stdout:77309411332 2026-04-15T13:35:10.952 INFO:tasks.cephadm.ceph_manager.ceph:need seq 77309411331 got 77309411332 for osd.3 2026-04-15T13:35:10.952 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.977 INFO:tasks.cephadm.ceph_manager.ceph:need seq 90194313219 got 90194313219 for osd.7 2026-04-15T13:35:10.977 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.981 INFO:tasks.cephadm.ceph_manager.ceph:need seq 73014444036 got 73014444036 for osd.2 2026-04-15T13:35:10.982 DEBUG:teuthology.parallel:result is None 2026-04-15T13:35:10.982 INFO:tasks.cephadm.ceph_manager.ceph:waiting for clean 2026-04-15T13:35:10.982 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph pg dump --format=json 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: cluster 2026-04-15T13:35:10.170753+0000 mgr.vm06.qbbldl (mgr.14229) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: cluster 2026-04-15T13:35:10.170753+0000 mgr.vm06.qbbldl (mgr.14229) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.300649+0000 mon.vm06 (mon.0) 662 : audit [DBG] from='client.? 192.168.123.106:0/1039819213' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.300649+0000 mon.vm06 (mon.0) 662 : audit [DBG] from='client.? 192.168.123.106:0/1039819213' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.597324+0000 mon.vm06 (mon.0) 663 : audit [DBG] from='client.? 192.168.123.106:0/3700756978' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.597324+0000 mon.vm06 (mon.0) 663 : audit [DBG] from='client.? 192.168.123.106:0/3700756978' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.641793+0000 mon.vm09 (mon.1) 28 : audit [DBG] from='client.? 192.168.123.106:0/2213565403' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.641793+0000 mon.vm09 (mon.1) 28 : audit [DBG] from='client.? 192.168.123.106:0/2213565403' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.734641+0000 mon.vm06 (mon.0) 664 : audit [DBG] from='client.? 192.168.123.106:0/3028070309' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.734641+0000 mon.vm06 (mon.0) 664 : audit [DBG] from='client.? 192.168.123.106:0/3028070309' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.766269+0000 mon.vm06 (mon.0) 665 : audit [DBG] from='client.? 192.168.123.106:0/236836123' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.766269+0000 mon.vm06 (mon.0) 665 : audit [DBG] from='client.? 192.168.123.106:0/236836123' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.812834+0000 mon.vm06 (mon.0) 666 : audit [DBG] from='client.? 192.168.123.106:0/1893400504' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.812834+0000 mon.vm06 (mon.0) 666 : audit [DBG] from='client.? 192.168.123.106:0/1893400504' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.869230+0000 mon.vm06 (mon.0) 667 : audit [DBG] from='client.? 192.168.123.106:0/985278545' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:11.103 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:11 vm06 bash[28114]: audit 2026-04-15T13:35:10.869230+0000 mon.vm06 (mon.0) 667 : audit [DBG] from='client.? 192.168.123.106:0/985278545' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:11.279 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: cluster 2026-04-15T13:35:10.170753+0000 mgr.vm06.qbbldl (mgr.14229) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: cluster 2026-04-15T13:35:10.170753+0000 mgr.vm06.qbbldl (mgr.14229) 92 : cluster [DBG] pgmap v43: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.300649+0000 mon.vm06 (mon.0) 662 : audit [DBG] from='client.? 192.168.123.106:0/1039819213' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.300649+0000 mon.vm06 (mon.0) 662 : audit [DBG] from='client.? 192.168.123.106:0/1039819213' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 1} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.597324+0000 mon.vm06 (mon.0) 663 : audit [DBG] from='client.? 192.168.123.106:0/3700756978' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.597324+0000 mon.vm06 (mon.0) 663 : audit [DBG] from='client.? 192.168.123.106:0/3700756978' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 5} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.641793+0000 mon.vm09 (mon.1) 28 : audit [DBG] from='client.? 192.168.123.106:0/2213565403' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.641793+0000 mon.vm09 (mon.1) 28 : audit [DBG] from='client.? 192.168.123.106:0/2213565403' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 4} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.734641+0000 mon.vm06 (mon.0) 664 : audit [DBG] from='client.? 192.168.123.106:0/3028070309' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.734641+0000 mon.vm06 (mon.0) 664 : audit [DBG] from='client.? 192.168.123.106:0/3028070309' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 6} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.766269+0000 mon.vm06 (mon.0) 665 : audit [DBG] from='client.? 192.168.123.106:0/236836123' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.766269+0000 mon.vm06 (mon.0) 665 : audit [DBG] from='client.? 192.168.123.106:0/236836123' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 2} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.812834+0000 mon.vm06 (mon.0) 666 : audit [DBG] from='client.? 192.168.123.106:0/1893400504' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.812834+0000 mon.vm06 (mon.0) 666 : audit [DBG] from='client.? 192.168.123.106:0/1893400504' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 7} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.869230+0000 mon.vm06 (mon.0) 667 : audit [DBG] from='client.? 192.168.123.106:0/985278545' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:11.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:11 vm09 bash[34466]: audit 2026-04-15T13:35:10.869230+0000 mon.vm06 (mon.0) 667 : audit [DBG] from='client.? 192.168.123.106:0/985278545' entity='client.admin' cmd={"prefix": "osd last-stat-seq", "id": 3} : dispatch 2026-04-15T13:35:11.645 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:11.645 INFO:teuthology.orchestra.run.vm06.stderr:dumped all 2026-04-15T13:35:11.712 INFO:teuthology.orchestra.run.vm06.stdout:{"pg_ready":true,"pg_map":{"version":43,"stamp":"2026-04-15T13:35:10.170624+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167706624,"kb_used":219716,"kb_used_data":3532,"kb_used_omap":57,"kb_used_meta":215494,"kb_avail":167486908,"statfs":{"total":171731582976,"available":171506593792,"internally_reserved":0,"allocated":3616768,"data_stored":2451248,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":59089,"internal_metadata":220666159},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"6.000669"},"pg_stats":[{"pgid":"1.0","version":"23'192","reported_seq":242,"reported_epoch":24,"state":"active+clean","last_fresh":"2026-04-15T13:35:02.070337+0000","last_change":"2026-04-15T13:35:01.121238+0000","last_active":"2026-04-15T13:35:02.070337+0000","last_peered":"2026-04-15T13:35:02.070337+0000","last_clean":"2026-04-15T13:35:02.070337+0000","last_became_active":"2026-04-15T13:35:01.120983+0000","last_became_peered":"2026-04-15T13:35:01.120983+0000","last_unstale":"2026-04-15T13:35:02.070337+0000","last_undegraded":"2026-04-15T13:35:02.070337+0000","last_fullsized":"2026-04-15T13:35:02.070337+0000","mapping_epoch":22,"log_start":"0'0","ondisk_log_start":"0'0","created":19,"last_epoch_clean":23,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-15T13:34:57.012670+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-15T13:34:57.012670+0000","last_clean_scrub_stamp":"2026-04-15T13:34:57.012670+0000","objects_scrubbed":0,"log_size":192,"log_dups_size":0,"ondisk_log_size":192,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-16T20:49:03.241352+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1781760,"data_stored":1771104,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":21,"seq":90194313220,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27952,"kb_used_data":804,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20935376,"statfs":{"total":21466447872,"available":21437825024,"internally_reserved":0,"allocated":823296,"data_stored":675386,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6159,"internal_metadata":27584497},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":21,"seq":90194313221,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6163,"internal_metadata":27584493},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":20,"seq":85899345924,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6164,"internal_metadata":27584492},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":19,"seq":81604378628,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27583185},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411332,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27952,"kb_used_data":804,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20935376,"statfs":{"total":21466447872,"available":21437825024,"internally_reserved":0,"allocated":823296,"data_stored":675386,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8119,"internal_metadata":27582537},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":17,"seq":73014444037,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27583185},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":8,"kb_used_meta":26935,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8771,"internal_metadata":27581885},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27892,"kb_used_data":804,"kb_used_omap":8,"kb_used_meta":26935,"kb_avail":20935436,"statfs":{"total":21466447872,"available":21437886464,"internally_reserved":0,"allocated":823296,"data_stored":675386,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8771,"internal_metadata":27581885},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-15T13:35:11.712 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph pg dump --format=json 2026-04-15T13:35:11.979 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:12.357 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:12.357 INFO:teuthology.orchestra.run.vm06.stderr:dumped all 2026-04-15T13:35:12.430 INFO:teuthology.orchestra.run.vm06.stdout:{"pg_ready":true,"pg_map":{"version":44,"stamp":"2026-04-15T13:35:12.170855+0000","last_osdmap_epoch":0,"last_pg_scan":0,"pg_stats_sum":{"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":0},"osd_stats_sum":{"up_from":0,"seq":0,"num_pgs":3,"num_osds":8,"num_per_pool_osds":8,"num_per_pool_omap_osds":3,"kb":167706624,"kb_used":219716,"kb_used_data":3532,"kb_used_omap":57,"kb_used_meta":215494,"kb_avail":167486908,"statfs":{"total":171731582976,"available":171506593792,"internally_reserved":0,"allocated":3616768,"data_stored":2451248,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":59089,"internal_metadata":220666159},"hb_peers":[],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[],"network_ping_times":[]},"pg_stats_delta":{"stat_sum":{"num_bytes":0,"num_objects":0,"num_object_clones":0,"num_object_copies":0,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":0,"num_whiteouts":0,"num_read":0,"num_read_kb":0,"num_write":0,"num_write_kb":0,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":0,"ondisk_log_size":0,"up":0,"acting":0,"num_store_stats":0,"stamp_delta":"8.000900"},"pg_stats":[{"pgid":"1.0","version":"23'192","reported_seq":242,"reported_epoch":24,"state":"active+clean","last_fresh":"2026-04-15T13:35:02.070337+0000","last_change":"2026-04-15T13:35:01.121238+0000","last_active":"2026-04-15T13:35:02.070337+0000","last_peered":"2026-04-15T13:35:02.070337+0000","last_clean":"2026-04-15T13:35:02.070337+0000","last_became_active":"2026-04-15T13:35:01.120983+0000","last_became_peered":"2026-04-15T13:35:01.120983+0000","last_unstale":"2026-04-15T13:35:02.070337+0000","last_undegraded":"2026-04-15T13:35:02.070337+0000","last_fullsized":"2026-04-15T13:35:02.070337+0000","mapping_epoch":22,"log_start":"0'0","ondisk_log_start":"0'0","created":19,"last_epoch_clean":23,"parent":"0.0","parent_split_bits":0,"last_scrub":"0'0","last_scrub_stamp":"2026-04-15T13:34:57.012670+0000","last_deep_scrub":"0'0","last_deep_scrub_stamp":"2026-04-15T13:34:57.012670+0000","last_clean_scrub_stamp":"2026-04-15T13:34:57.012670+0000","objects_scrubbed":0,"log_size":192,"log_dups_size":0,"ondisk_log_size":192,"stats_invalid":false,"dirty_stats_invalid":false,"omap_stats_invalid":false,"hitset_stats_invalid":false,"hitset_bytes_stats_invalid":false,"pin_stats_invalid":false,"manifest_stats_invalid":false,"snaptrimq_len":0,"last_scrub_duration":0,"scrub_schedule":"periodic scrub scheduled @ 2026-04-16T20:49:03.241352+0000","scrub_duration":0,"objects_trimmed":0,"snaptrim_duration":0,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"up":[7,0,3],"acting":[7,0,3],"avail_no_missing":[],"object_location_counts":[],"blocked_by":[],"up_primary":7,"acting_primary":7,"purged_snaps":[]}],"pool_stats":[{"poolid":1,"num_pg":1,"stat_sum":{"num_bytes":590368,"num_objects":2,"num_object_clones":0,"num_object_copies":6,"num_objects_missing_on_primary":0,"num_objects_missing":0,"num_objects_degraded":0,"num_objects_misplaced":0,"num_objects_unfound":0,"num_objects_dirty":2,"num_whiteouts":0,"num_read":126,"num_read_kb":109,"num_write":233,"num_write_kb":4760,"num_scrub_errors":0,"num_shallow_scrub_errors":0,"num_deep_scrub_errors":0,"num_objects_recovered":0,"num_bytes_recovered":0,"num_keys_recovered":0,"num_objects_omap":0,"num_objects_hit_set_archive":0,"num_bytes_hit_set_archive":0,"num_flush":0,"num_flush_kb":0,"num_evict":0,"num_evict_kb":0,"num_promote":0,"num_flush_mode_high":0,"num_flush_mode_low":0,"num_evict_mode_some":0,"num_evict_mode_full":0,"num_objects_pinned":0,"num_legacy_snapsets":0,"num_large_omap_objects":0,"num_objects_manifest":0,"num_omap_bytes":0,"num_omap_keys":0,"num_objects_repaired":0},"store_stats":{"total":0,"available":0,"internally_reserved":0,"allocated":1781760,"data_stored":1771104,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},"log_size":192,"ondisk_log_size":192,"up":3,"acting":3,"num_store_stats":4}],"osd_stats":[{"osd":7,"up_from":21,"seq":90194313220,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27952,"kb_used_data":804,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20935376,"statfs":{"total":21466447872,"available":21437825024,"internally_reserved":0,"allocated":823296,"data_stored":675386,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6159,"internal_metadata":27584497},"hb_peers":[0,1,2,3,4,5,6],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":6,"up_from":21,"seq":90194313221,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6163,"internal_metadata":27584493},"hb_peers":[0,1,2,3,4,5,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":5,"up_from":20,"seq":85899345924,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":6,"kb_used_meta":26937,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":6164,"internal_metadata":27584492},"hb_peers":[0,1,2,3,4,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":4,"up_from":19,"seq":81604378628,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27583185},"hb_peers":[0,1,2,3,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":3,"up_from":18,"seq":77309411333,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27952,"kb_used_data":804,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20935376,"statfs":{"total":21466447872,"available":21437825024,"internally_reserved":0,"allocated":823296,"data_stored":675386,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8119,"internal_metadata":27582537},"hb_peers":[0,1,2,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":2,"up_from":17,"seq":73014444037,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":7,"kb_used_meta":26936,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":7471,"internal_metadata":27583185},"hb_peers":[0,1,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":1,"up_from":16,"seq":68719476741,"num_pgs":0,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":0,"kb":20963328,"kb_used":27184,"kb_used_data":224,"kb_used_omap":8,"kb_used_meta":26935,"kb_avail":20936144,"statfs":{"total":21466447872,"available":21438611456,"internally_reserved":0,"allocated":229376,"data_stored":85018,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8771,"internal_metadata":27581885},"hb_peers":[0,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]},{"osd":0,"up_from":16,"seq":68719476741,"num_pgs":1,"num_osds":1,"num_per_pool_osds":1,"num_per_pool_omap_osds":1,"kb":20963328,"kb_used":27892,"kb_used_data":804,"kb_used_omap":8,"kb_used_meta":26935,"kb_avail":20935436,"statfs":{"total":21466447872,"available":21437886464,"internally_reserved":0,"allocated":823296,"data_stored":675386,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":8771,"internal_metadata":27581885},"hb_peers":[1,2,3,4,5,6,7],"snap_trim_queue_len":0,"num_snap_trimming":0,"num_shards_repaired":0,"op_queue_age_hist":{"histogram":[],"upper_bound":1},"perf_stat":{"commit_latency_ms":0,"apply_latency_ms":0,"commit_latency_ns":0,"apply_latency_ns":0},"alerts":[]}],"pool_statfs":[{"poolid":1,"osd":0,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":1,"total":0,"available":0,"internally_reserved":0,"allocated":0,"data_stored":0,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":3,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0},{"poolid":1,"osd":7,"total":0,"available":0,"internally_reserved":0,"allocated":593920,"data_stored":590368,"data_compressed":0,"data_compressed_allocated":0,"data_compressed_original":0,"omap_allocated":0,"internal_metadata":0}]}} 2026-04-15T13:35:12.430 INFO:tasks.cephadm.ceph_manager.ceph:clean! 2026-04-15T13:35:12.430 INFO:tasks.ceph:Waiting until ceph cluster ceph is healthy... 2026-04-15T13:35:12.430 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy 2026-04-15T13:35:12.430 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph health --format=json 2026-04-15T13:35:12.687 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:13.075 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:13.075 INFO:teuthology.orchestra.run.vm06.stdout:{"status":"HEALTH_OK","checks":{},"mutes":[]} 2026-04-15T13:35:13.138 INFO:tasks.cephadm.ceph_manager.ceph:wait_until_healthy done 2026-04-15T13:35:13.138 INFO:tasks.cephadm:Setup complete, yielding 2026-04-15T13:35:13.138 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T13:35:13.140 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-04-15T13:35:13.140 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch status' 2026-04-15T13:35:13.404 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: audit 2026-04-15T13:35:11.645872+0000 mgr.vm06.qbbldl (mgr.14229) 93 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: audit 2026-04-15T13:35:11.645872+0000 mgr.vm06.qbbldl (mgr.14229) 93 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: cluster 2026-04-15T13:35:12.170966+0000 mgr.vm06.qbbldl (mgr.14229) 94 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: cluster 2026-04-15T13:35:12.170966+0000 mgr.vm06.qbbldl (mgr.14229) 94 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: audit 2026-04-15T13:35:12.357655+0000 mgr.vm06.qbbldl (mgr.14229) 95 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: audit 2026-04-15T13:35:12.357655+0000 mgr.vm06.qbbldl (mgr.14229) 95 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: audit 2026-04-15T13:35:13.075889+0000 mon.vm06 (mon.0) 668 : audit [DBG] from='client.? 192.168.123.106:0/3809435059' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-15T13:35:13.455 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:13 vm06 bash[28114]: audit 2026-04-15T13:35:13.075889+0000 mon.vm06 (mon.0) 668 : audit [DBG] from='client.? 192.168.123.106:0/3809435059' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: audit 2026-04-15T13:35:11.645872+0000 mgr.vm06.qbbldl (mgr.14229) 93 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: audit 2026-04-15T13:35:11.645872+0000 mgr.vm06.qbbldl (mgr.14229) 93 : audit [DBG] from='client.14552 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: cluster 2026-04-15T13:35:12.170966+0000 mgr.vm06.qbbldl (mgr.14229) 94 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: cluster 2026-04-15T13:35:12.170966+0000 mgr.vm06.qbbldl (mgr.14229) 94 : cluster [DBG] pgmap v44: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: audit 2026-04-15T13:35:12.357655+0000 mgr.vm06.qbbldl (mgr.14229) 95 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: audit 2026-04-15T13:35:12.357655+0000 mgr.vm06.qbbldl (mgr.14229) 95 : audit [DBG] from='client.14556 -' entity='client.admin' cmd=[{"prefix": "pg dump", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: audit 2026-04-15T13:35:13.075889+0000 mon.vm06 (mon.0) 668 : audit [DBG] from='client.? 192.168.123.106:0/3809435059' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-15T13:35:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:13 vm09 bash[34466]: audit 2026-04-15T13:35:13.075889+0000 mon.vm06 (mon.0) 668 : audit [DBG] from='client.? 192.168.123.106:0/3809435059' entity='client.admin' cmd={"prefix": "health", "format": "json"} : dispatch 2026-04-15T13:35:13.801 INFO:teuthology.orchestra.run.vm06.stdout:Backend: cephadm 2026-04-15T13:35:13.801 INFO:teuthology.orchestra.run.vm06.stdout:Available: Yes 2026-04-15T13:35:13.801 INFO:teuthology.orchestra.run.vm06.stdout:Paused: No 2026-04-15T13:35:13.868 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch ps' 2026-04-15T13:35:14.154 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:14.562 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager.vm06 vm06 *:9093,9094 running (45s) 12s ago 90s 14.7M - 0.28.1 27c475db5fb1 26e3c2dc7d16 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm06 vm06 *:9926 running (96s) 12s ago 96s 8059k - 20.2.0-19-g7ec4401a095 b4cb326006c0 b74aec347cb5 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm09 vm09 *:9926 running (58s) 13s ago 58s 8271k - 20.2.0-19-g7ec4401a095 b4cb326006c0 f4fa3cdfd9ed 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm06 vm06 running (95s) 12s ago 95s 10.7M - 20.2.0-19-g7ec4401a095 b4cb326006c0 2a2696a315a2 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm09 vm09 running (57s) 13s ago 57s 11.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c073efaeb27a 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:grafana.vm06 vm06 *:3000 running (46s) 12s ago 81s 127M - 12.2.0 74144189b384 38104cd71d42 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm06.qbbldl vm06 *:9283,8765,8443 running (2m) 12s ago 2m 527M - 20.2.0-19-g7ec4401a095 b4cb326006c0 afc83296061d 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm09.kpawde vm09 *:8443,9283,8765 running (56s) 13s ago 56s 469M - 20.2.0-19-g7ec4401a095 b4cb326006c0 9c10fb3b60f6 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm06 vm06 running (2m) 12s ago 2m 47.5M 2048M 20.2.0-19-g7ec4401a095 b4cb326006c0 713ee534aa80 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm09 vm09 running (55s) 13s ago 55s 43.5M 2048M 20.2.0-19-g7ec4401a095 b4cb326006c0 e5c118d71075 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm06 vm06 *:9100 running (92s) 12s ago 94s 9335k - 1.9.1 d00a542e409e b00e05757d31 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm09 vm09 *:9100 running (54s) 13s ago 56s 7476k - 1.9.1 d00a542e409e 632df45ce8d1 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.0 vm09 running (24s) 13s ago 26s 30.2M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 b5fa6b7c6859 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.1 vm06 running (23s) 12s ago 26s 38.7M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 9b7af0855b1c 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.2 vm09 running (22s) 13s ago 25s 36.6M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 d71897b20df0 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.3 vm06 running (22s) 12s ago 24s 38.1M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 97f5165630d1 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.4 vm09 running (20s) 13s ago 23s 27.8M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 5d8caca6897f 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.5 vm06 running (20s) 12s ago 23s 39.5M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 16132db1706e 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.6 vm09 running (19s) 13s ago 21s 27.8M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 2f5d7954c0d4 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:osd.7 vm06 running (18s) 12s ago 21s 24.9M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 78508029aef6 2026-04-15T13:35:14.563 INFO:teuthology.orchestra.run.vm06.stdout:prometheus.vm06 vm06 *:9095 running (48s) 12s ago 75s 29.9M - 3.6.0 76947e7ef22f c6d18224ce05 2026-04-15T13:35:14.632 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch ls' 2026-04-15T13:35:14.893 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:15.261 INFO:teuthology.orchestra.run.vm06.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager ?:9093,9094 1/1 13s ago 116s count:1 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter ?:9926 2/2 14s ago 117s * 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:crash 2/2 14s ago 118s * 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:grafana ?:3000 1/1 13s ago 116s count:1 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:mgr 2/2 14s ago 118s count:2 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:mon 2/2 14s ago 92s vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09;count:2 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter ?:9100 2/2 14s ago 116s * 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:osd.all-available-devices 8 14s ago 45s * 2026-04-15T13:35:15.262 INFO:teuthology.orchestra.run.vm06.stdout:prometheus ?:9095 1/1 13s ago 117s count:1 2026-04-15T13:35:15.336 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch host ls' 2026-04-15T13:35:15.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:15 vm06 bash[28114]: audit 2026-04-15T13:35:13.801786+0000 mgr.vm06.qbbldl (mgr.14229) 96 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:15.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:15 vm06 bash[28114]: audit 2026-04-15T13:35:13.801786+0000 mgr.vm06.qbbldl (mgr.14229) 96 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:15.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:15 vm06 bash[28114]: cluster 2026-04-15T13:35:14.171418+0000 mgr.vm06.qbbldl (mgr.14229) 97 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:15.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:15 vm06 bash[28114]: cluster 2026-04-15T13:35:14.171418+0000 mgr.vm06.qbbldl (mgr.14229) 97 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:15.595 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:15 vm09 bash[34466]: audit 2026-04-15T13:35:13.801786+0000 mgr.vm06.qbbldl (mgr.14229) 96 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:15 vm09 bash[34466]: audit 2026-04-15T13:35:13.801786+0000 mgr.vm06.qbbldl (mgr.14229) 96 : audit [DBG] from='client.14564 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:15 vm09 bash[34466]: cluster 2026-04-15T13:35:14.171418+0000 mgr.vm06.qbbldl (mgr.14229) 97 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:15 vm09 bash[34466]: cluster 2026-04-15T13:35:14.171418+0000 mgr.vm06.qbbldl (mgr.14229) 97 : cluster [DBG] pgmap v45: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:15.965 INFO:teuthology.orchestra.run.vm06.stdout:HOST ADDR LABELS STATUS 2026-04-15T13:35:15.965 INFO:teuthology.orchestra.run.vm06.stdout:vm06 192.168.123.106 2026-04-15T13:35:15.965 INFO:teuthology.orchestra.run.vm06.stdout:vm09 192.168.123.109 2026-04-15T13:35:15.965 INFO:teuthology.orchestra.run.vm06.stdout:2 hosts in cluster 2026-04-15T13:35:16.066 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch device ls' 2026-04-15T13:35:16.326 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:16.344 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:16 vm06 bash[28114]: audit 2026-04-15T13:35:14.558874+0000 mgr.vm06.qbbldl (mgr.14229) 98 : audit [DBG] from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.344 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:16 vm06 bash[28114]: audit 2026-04-15T13:35:14.558874+0000 mgr.vm06.qbbldl (mgr.14229) 98 : audit [DBG] from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.344 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:16 vm06 bash[28114]: audit 2026-04-15T13:35:15.260219+0000 mgr.vm06.qbbldl (mgr.14229) 99 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.344 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:16 vm06 bash[28114]: audit 2026-04-15T13:35:15.260219+0000 mgr.vm06.qbbldl (mgr.14229) 99 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:16 vm09 bash[34466]: audit 2026-04-15T13:35:14.558874+0000 mgr.vm06.qbbldl (mgr.14229) 98 : audit [DBG] from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:16 vm09 bash[34466]: audit 2026-04-15T13:35:14.558874+0000 mgr.vm06.qbbldl (mgr.14229) 98 : audit [DBG] from='client.14568 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:16 vm09 bash[34466]: audit 2026-04-15T13:35:15.260219+0000 mgr.vm06.qbbldl (mgr.14229) 99 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:16 vm09 bash[34466]: audit 2026-04-15T13:35:15.260219+0000 mgr.vm06.qbbldl (mgr.14229) 99 : audit [DBG] from='client.14570 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme0n1 ssd Linux_c12cb9df1acb8205e4f3 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme1n1 ssd Linux_f4056127debabf608d65 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme2n1 ssd Linux_c827407f82fbaf83f886 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme3n1 ssd Linux_81c0ecdf70e1ab991990 19.9G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 12s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.701 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 12s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme0n1 ssd Linux_b804cfa419b2e6ee38b2 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme1n1 ssd Linux_d32cd802138d477b0c27 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme2n1 ssd Linux_1b2c890b6b49bb6c8abc 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme3n1 ssd Linux_e1183b90f8221b5791e4 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 13s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdb hdd DWNBRSTVMM09001 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdc hdd DWNBRSTVMM09002 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdd hdd DWNBRSTVMM09003 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.702 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vde hdd DWNBRSTVMM09004 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:16.766 INFO:teuthology.run_tasks:Running task vip... 2026-04-15T13:35:16.769 INFO:tasks.vip:Allocating static IPs for each host... 2026-04-15T13:35:16.769 INFO:tasks.vip:peername 192.168.123.106 2026-04-15T13:35:16.769 INFO:tasks.vip:192.168.123.106 in 192.168.123.0/24, pos 105 2026-04-15T13:35:16.769 INFO:tasks.vip:vm06.local static 12.12.0.106, vnet 12.12.0.0/22 2026-04-15T13:35:16.769 INFO:tasks.vip:VIPs are [IPv4Address('12.12.1.106')] 2026-04-15T13:35:16.769 DEBUG:teuthology.orchestra.run.vm06:> sudo ip route ls 2026-04-15T13:35:16.778 INFO:teuthology.orchestra.run.vm06.stdout:default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.106 metric 100 2026-04-15T13:35:16.778 INFO:teuthology.orchestra.run.vm06.stdout:172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-04-15T13:35:16.778 INFO:teuthology.orchestra.run.vm06.stdout:192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.106 metric 100 2026-04-15T13:35:16.778 INFO:teuthology.orchestra.run.vm06.stdout:192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.106 metric 100 2026-04-15T13:35:16.778 INFO:tasks.vip:Configuring 12.12.0.106 on vm06.local iface ens3... 2026-04-15T13:35:16.779 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr add 12.12.0.106/22 dev ens3 2026-04-15T13:35:16.831 INFO:tasks.vip:peername 192.168.123.109 2026-04-15T13:35:16.832 INFO:tasks.vip:192.168.123.109 in 192.168.123.0/24, pos 108 2026-04-15T13:35:16.832 INFO:tasks.vip:vm09.local static 12.12.0.109, vnet 12.12.0.0/22 2026-04-15T13:35:16.832 DEBUG:teuthology.orchestra.run.vm09:> sudo ip route ls 2026-04-15T13:35:16.840 INFO:teuthology.orchestra.run.vm09.stdout:default via 192.168.123.1 dev ens3 proto dhcp src 192.168.123.109 metric 100 2026-04-15T13:35:16.840 INFO:teuthology.orchestra.run.vm09.stdout:172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown 2026-04-15T13:35:16.840 INFO:teuthology.orchestra.run.vm09.stdout:192.168.123.0/24 dev ens3 proto kernel scope link src 192.168.123.109 metric 100 2026-04-15T13:35:16.840 INFO:teuthology.orchestra.run.vm09.stdout:192.168.123.1 dev ens3 proto dhcp scope link src 192.168.123.109 metric 100 2026-04-15T13:35:16.841 INFO:tasks.vip:Configuring 12.12.0.109 on vm09.local iface ens3... 2026-04-15T13:35:16.841 DEBUG:teuthology.orchestra.run.vm09:> sudo ip addr add 12.12.0.109/22 dev ens3 2026-04-15T13:35:16.888 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T13:35:16.890 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-04-15T13:35:16.890 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch device ls --refresh' 2026-04-15T13:35:17.160 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:17 vm06 bash[28114]: audit 2026-04-15T13:35:15.965418+0000 mgr.vm06.qbbldl (mgr.14229) 100 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:17 vm06 bash[28114]: audit 2026-04-15T13:35:15.965418+0000 mgr.vm06.qbbldl (mgr.14229) 100 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:17 vm06 bash[28114]: cluster 2026-04-15T13:35:16.171898+0000 mgr.vm06.qbbldl (mgr.14229) 101 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:17.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:17 vm06 bash[28114]: cluster 2026-04-15T13:35:16.171898+0000 mgr.vm06.qbbldl (mgr.14229) 101 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme0n1 ssd Linux_c12cb9df1acb8205e4f3 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme1n1 ssd Linux_f4056127debabf608d65 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme2n1 ssd Linux_c827407f82fbaf83f886 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme3n1 ssd Linux_81c0ecdf70e1ab991990 19.9G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 13s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 13s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme0n1 ssd Linux_b804cfa419b2e6ee38b2 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme1n1 ssd Linux_d32cd802138d477b0c27 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme2n1 ssd Linux_1b2c890b6b49bb6c8abc 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme3n1 ssd Linux_e1183b90f8221b5791e4 19.9G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 14s ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdb hdd DWNBRSTVMM09001 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdc hdd DWNBRSTVMM09002 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdd hdd DWNBRSTVMM09003 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.549 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vde hdd DWNBRSTVMM09004 20.0G No 14s ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:35:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:17 vm09 bash[34466]: audit 2026-04-15T13:35:15.965418+0000 mgr.vm06.qbbldl (mgr.14229) 100 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:17 vm09 bash[34466]: audit 2026-04-15T13:35:15.965418+0000 mgr.vm06.qbbldl (mgr.14229) 100 : audit [DBG] from='client.14574 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:17 vm09 bash[34466]: cluster 2026-04-15T13:35:16.171898+0000 mgr.vm06.qbbldl (mgr.14229) 101 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:17 vm09 bash[34466]: cluster 2026-04-15T13:35:16.171898+0000 mgr.vm06.qbbldl (mgr.14229) 101 : cluster [DBG] pgmap v46: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:17.678 INFO:teuthology.run_tasks:Running task cephadm.apply... 2026-04-15T13:35:17.682 INFO:tasks.cephadm:Applying spec(s): placement: count: 4 host_pattern: '*' service_id: foo service_type: rgw spec: rgw_frontend_port: 8000 --- placement: count: 2 service_id: rgw.foo service_type: ingress spec: backend_service: rgw.foo frontend_port: 9000 monitor_port: 9001 virtual_ip: 12.12.1.106/22 2026-04-15T13:35:17.682 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch apply -i - 2026-04-15T13:35:17.994 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:18.418 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled rgw.foo update... 2026-04-15T13:35:18.418 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled ingress.rgw.foo update... 2026-04-15T13:35:18.497 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-15T13:35:18.500 INFO:tasks.cephadm:Waiting for ceph service rgw.foo to start (timeout 300)... 2026-04-15T13:35:18.500 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:18 vm09 bash[34466]: audit 2026-04-15T13:35:16.700826+0000 mgr.vm06.qbbldl (mgr.14229) 102 : audit [DBG] from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:18 vm09 bash[34466]: audit 2026-04-15T13:35:16.700826+0000 mgr.vm06.qbbldl (mgr.14229) 102 : audit [DBG] from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:18 vm09 bash[34466]: audit 2026-04-15T13:35:17.549631+0000 mon.vm06 (mon.0) 669 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:18 vm09 bash[34466]: audit 2026-04-15T13:35:17.549631+0000 mon.vm06 (mon.0) 669 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:18.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:18 vm06 bash[28114]: audit 2026-04-15T13:35:16.700826+0000 mgr.vm06.qbbldl (mgr.14229) 102 : audit [DBG] from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:18.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:18 vm06 bash[28114]: audit 2026-04-15T13:35:16.700826+0000 mgr.vm06.qbbldl (mgr.14229) 102 : audit [DBG] from='client.14578 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:18.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:18 vm06 bash[28114]: audit 2026-04-15T13:35:17.549631+0000 mon.vm06 (mon.0) 669 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:18.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:18 vm06 bash[28114]: audit 2026-04-15T13:35:17.549631+0000 mon.vm06 (mon.0) 669 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:18.780 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:19.155 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:19.156 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:01.823276Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:01.120707Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:01.120470Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:01.823176Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:01.120597Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:01.120738Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:01.120504Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:01.120382Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:01.822773Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.414258Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:18.409636Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T13:35:19.220 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: audit 2026-04-15T13:35:17.547367+0000 mgr.vm06.qbbldl (mgr.14229) 103 : audit [DBG] from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: audit 2026-04-15T13:35:17.547367+0000 mgr.vm06.qbbldl (mgr.14229) 103 : audit [DBG] from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: cluster 2026-04-15T13:35:18.172195+0000 mgr.vm06.qbbldl (mgr.14229) 104 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: cluster 2026-04-15T13:35:18.172195+0000 mgr.vm06.qbbldl (mgr.14229) 104 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: audit 2026-04-15T13:35:18.413998+0000 mon.vm06 (mon.0) 670 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: audit 2026-04-15T13:35:18.413998+0000 mon.vm06 (mon.0) 670 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: audit 2026-04-15T13:35:18.418389+0000 mon.vm06 (mon.0) 671 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:19 vm06 bash[28114]: audit 2026-04-15T13:35:18.418389+0000 mon.vm06 (mon.0) 671 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: audit 2026-04-15T13:35:17.547367+0000 mgr.vm06.qbbldl (mgr.14229) 103 : audit [DBG] from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: audit 2026-04-15T13:35:17.547367+0000 mgr.vm06.qbbldl (mgr.14229) 103 : audit [DBG] from='client.14582 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "refresh": true, "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: cluster 2026-04-15T13:35:18.172195+0000 mgr.vm06.qbbldl (mgr.14229) 104 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: cluster 2026-04-15T13:35:18.172195+0000 mgr.vm06.qbbldl (mgr.14229) 104 : cluster [DBG] pgmap v47: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: audit 2026-04-15T13:35:18.413998+0000 mon.vm06 (mon.0) 670 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: audit 2026-04-15T13:35:18.413998+0000 mon.vm06 (mon.0) 670 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: audit 2026-04-15T13:35:18.418389+0000 mon.vm06 (mon.0) 671 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:19 vm09 bash[34466]: audit 2026-04-15T13:35:18.418389+0000 mon.vm06 (mon.0) 671 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:20.221 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:20.498 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: audit 2026-04-15T13:35:18.406763+0000 mgr.vm06.qbbldl (mgr.14229) 105 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: audit 2026-04-15T13:35:18.406763+0000 mgr.vm06.qbbldl (mgr.14229) 105 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: cephadm 2026-04-15T13:35:18.409619+0000 mgr.vm06.qbbldl (mgr.14229) 106 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: cephadm 2026-04-15T13:35:18.409619+0000 mgr.vm06.qbbldl (mgr.14229) 106 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: cephadm 2026-04-15T13:35:18.414545+0000 mgr.vm06.qbbldl (mgr.14229) 107 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: cephadm 2026-04-15T13:35:18.414545+0000 mgr.vm06.qbbldl (mgr.14229) 107 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: audit 2026-04-15T13:35:19.154501+0000 mgr.vm06.qbbldl (mgr.14229) 108 : audit [DBG] from='client.14590 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:20.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:20 vm06 bash[28114]: audit 2026-04-15T13:35:19.154501+0000 mgr.vm06.qbbldl (mgr.14229) 108 : audit [DBG] from='client.14590 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: audit 2026-04-15T13:35:18.406763+0000 mgr.vm06.qbbldl (mgr.14229) 105 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: audit 2026-04-15T13:35:18.406763+0000 mgr.vm06.qbbldl (mgr.14229) 105 : audit [DBG] from='client.14586 -' entity='client.admin' cmd=[{"prefix": "orch apply", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: cephadm 2026-04-15T13:35:18.409619+0000 mgr.vm06.qbbldl (mgr.14229) 106 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: cephadm 2026-04-15T13:35:18.409619+0000 mgr.vm06.qbbldl (mgr.14229) 106 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: cephadm 2026-04-15T13:35:18.414545+0000 mgr.vm06.qbbldl (mgr.14229) 107 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: cephadm 2026-04-15T13:35:18.414545+0000 mgr.vm06.qbbldl (mgr.14229) 107 : cephadm [INF] Saving service ingress.rgw.foo spec with placement count:2 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: audit 2026-04-15T13:35:19.154501+0000 mgr.vm06.qbbldl (mgr.14229) 108 : audit [DBG] from='client.14590 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:20 vm09 bash[34466]: audit 2026-04-15T13:35:19.154501+0000 mgr.vm06.qbbldl (mgr.14229) 108 : audit [DBG] from='client.14590 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:20.874 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:20.874 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:01.823276Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:01.120707Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:01.120470Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:01.823176Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:01.120597Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:01.120738Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:01.120504Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:01.120382Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:01.822773Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.414258Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:18.409636Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T13:35:20.960 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:21 vm09 bash[34466]: cluster 2026-04-15T13:35:20.172563+0000 mgr.vm06.qbbldl (mgr.14229) 109 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:21 vm09 bash[34466]: cluster 2026-04-15T13:35:20.172563+0000 mgr.vm06.qbbldl (mgr.14229) 109 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:21.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:21 vm06 bash[28114]: cluster 2026-04-15T13:35:20.172563+0000 mgr.vm06.qbbldl (mgr.14229) 109 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:21.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:21 vm06 bash[28114]: cluster 2026-04-15T13:35:20.172563+0000 mgr.vm06.qbbldl (mgr.14229) 109 : cluster [DBG] pgmap v48: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:21.961 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:22.299 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:22 vm06 bash[28114]: audit 2026-04-15T13:35:20.872525+0000 mgr.vm06.qbbldl (mgr.14229) 110 : audit [DBG] from='client.14594 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:22 vm06 bash[28114]: audit 2026-04-15T13:35:20.872525+0000 mgr.vm06.qbbldl (mgr.14229) 110 : audit [DBG] from='client.14594 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:22 vm06 bash[28114]: audit 2026-04-15T13:35:22.250530+0000 mon.vm06 (mon.0) 672 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:22 vm06 bash[28114]: audit 2026-04-15T13:35:22.250530+0000 mon.vm06 (mon.0) 672 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:22 vm06 bash[28114]: audit 2026-04-15T13:35:22.256306+0000 mon.vm06 (mon.0) 673 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:22 vm06 bash[28114]: audit 2026-04-15T13:35:22.256306+0000 mon.vm06 (mon.0) 673 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:22 vm09 bash[34466]: audit 2026-04-15T13:35:20.872525+0000 mgr.vm06.qbbldl (mgr.14229) 110 : audit [DBG] from='client.14594 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:22 vm09 bash[34466]: audit 2026-04-15T13:35:20.872525+0000 mgr.vm06.qbbldl (mgr.14229) 110 : audit [DBG] from='client.14594 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:22 vm09 bash[34466]: audit 2026-04-15T13:35:22.250530+0000 mon.vm06 (mon.0) 672 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:22 vm09 bash[34466]: audit 2026-04-15T13:35:22.250530+0000 mon.vm06 (mon.0) 672 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:22 vm09 bash[34466]: audit 2026-04-15T13:35:22.256306+0000 mon.vm06 (mon.0) 673 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:22 vm09 bash[34466]: audit 2026-04-15T13:35:22.256306+0000 mon.vm06 (mon.0) 673 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:22.886 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:22.886 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.414258Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:18.409636Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T13:35:22.960 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: cluster 2026-04-15T13:35:22.172938+0000 mgr.vm06.qbbldl (mgr.14229) 111 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: cluster 2026-04-15T13:35:22.172938+0000 mgr.vm06.qbbldl (mgr.14229) 111 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.860782+0000 mon.vm06 (mon.0) 674 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.860782+0000 mon.vm06 (mon.0) 674 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.870263+0000 mon.vm06 (mon.0) 675 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.870263+0000 mon.vm06 (mon.0) 675 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.884754+0000 mgr.vm06.qbbldl (mgr.14229) 112 : audit [DBG] from='client.24317 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.884754+0000 mgr.vm06.qbbldl (mgr.14229) 112 : audit [DBG] from='client.24317 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.942873+0000 mon.vm06 (mon.0) 676 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.942873+0000 mon.vm06 (mon.0) 676 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.948866+0000 mon.vm06 (mon.0) 677 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:23 vm09 bash[34466]: audit 2026-04-15T13:35:22.948866+0000 mon.vm06 (mon.0) 677 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: cluster 2026-04-15T13:35:22.172938+0000 mgr.vm06.qbbldl (mgr.14229) 111 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: cluster 2026-04-15T13:35:22.172938+0000 mgr.vm06.qbbldl (mgr.14229) 111 : cluster [DBG] pgmap v49: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.860782+0000 mon.vm06 (mon.0) 674 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.860782+0000 mon.vm06 (mon.0) 674 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.870263+0000 mon.vm06 (mon.0) 675 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.870263+0000 mon.vm06 (mon.0) 675 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.884754+0000 mgr.vm06.qbbldl (mgr.14229) 112 : audit [DBG] from='client.24317 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.884754+0000 mgr.vm06.qbbldl (mgr.14229) 112 : audit [DBG] from='client.24317 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.942873+0000 mon.vm06 (mon.0) 676 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.942873+0000 mon.vm06 (mon.0) 676 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.948866+0000 mon.vm06 (mon.0) 677 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:23 vm06 bash[28114]: audit 2026-04-15T13:35:22.948866+0000 mon.vm06 (mon.0) 677 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:23.961 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:24.239 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: audit 2026-04-15T13:35:23.481047+0000 mon.vm06 (mon.0) 678 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: audit 2026-04-15T13:35:23.481047+0000 mon.vm06 (mon.0) 678 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: audit 2026-04-15T13:35:23.533677+0000 mon.vm06 (mon.0) 679 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: audit 2026-04-15T13:35:23.533677+0000 mon.vm06 (mon.0) 679 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: audit 2026-04-15T13:35:23.538304+0000 mon.vm06 (mon.0) 680 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: audit 2026-04-15T13:35:23.538304+0000 mon.vm06 (mon.0) 680 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: cluster 2026-04-15T13:35:24.173206+0000 mgr.vm06.qbbldl (mgr.14229) 113 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:24 vm09 bash[34466]: cluster 2026-04-15T13:35:24.173206+0000 mgr.vm06.qbbldl (mgr.14229) 113 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:24.617 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:24.617 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.414258Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:18.409636Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: audit 2026-04-15T13:35:23.481047+0000 mon.vm06 (mon.0) 678 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: audit 2026-04-15T13:35:23.481047+0000 mon.vm06 (mon.0) 678 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: audit 2026-04-15T13:35:23.533677+0000 mon.vm06 (mon.0) 679 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: audit 2026-04-15T13:35:23.533677+0000 mon.vm06 (mon.0) 679 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: audit 2026-04-15T13:35:23.538304+0000 mon.vm06 (mon.0) 680 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: audit 2026-04-15T13:35:23.538304+0000 mon.vm06 (mon.0) 680 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: cluster 2026-04-15T13:35:24.173206+0000 mgr.vm06.qbbldl (mgr.14229) 113 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:24.629 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:24 vm06 bash[28114]: cluster 2026-04-15T13:35:24.173206+0000 mgr.vm06.qbbldl (mgr.14229) 113 : cluster [DBG] pgmap v50: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:24.695 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:25.696 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:25.988 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:24.615965+0000 mgr.vm06.qbbldl (mgr.14229) 114 : audit [DBG] from='client.14600 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:24.615965+0000 mgr.vm06.qbbldl (mgr.14229) 114 : audit [DBG] from='client.14600 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:24.828180+0000 mon.vm06 (mon.0) 681 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:24.828180+0000 mon.vm06 (mon.0) 681 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:24.832254+0000 mon.vm06 (mon.0) 682 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:24.832254+0000 mon.vm06 (mon.0) 682 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.450868+0000 mon.vm06 (mon.0) 683 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.450868+0000 mon.vm06 (mon.0) 683 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.456477+0000 mon.vm06 (mon.0) 684 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.006 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.456477+0000 mon.vm06 (mon.0) 684 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.457356+0000 mon.vm06 (mon.0) 685 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.457356+0000 mon.vm06 (mon.0) 685 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.457972+0000 mon.vm06 (mon.0) 686 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.457972+0000 mon.vm06 (mon.0) 686 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.462196+0000 mon.vm06 (mon.0) 687 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.462196+0000 mon.vm06 (mon.0) 687 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.463654+0000 mon.vm06 (mon.0) 688 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.463654+0000 mon.vm06 (mon.0) 688 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.467970+0000 mon.vm06 (mon.0) 689 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.467970+0000 mon.vm06 (mon.0) 689 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.470399+0000 mon.vm06 (mon.0) 690 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.470399+0000 mon.vm06 (mon.0) 690 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.475216+0000 mon.vm06 (mon.0) 691 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.475216+0000 mon.vm06 (mon.0) 691 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.476410+0000 mon.vm06 (mon.0) 692 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.007 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:25 vm06 bash[28114]: audit 2026-04-15T13:35:25.476410+0000 mon.vm06 (mon.0) 692 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:24.615965+0000 mgr.vm06.qbbldl (mgr.14229) 114 : audit [DBG] from='client.14600 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:24.615965+0000 mgr.vm06.qbbldl (mgr.14229) 114 : audit [DBG] from='client.14600 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:24.828180+0000 mon.vm06 (mon.0) 681 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:24.828180+0000 mon.vm06 (mon.0) 681 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:24.832254+0000 mon.vm06 (mon.0) 682 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:24.832254+0000 mon.vm06 (mon.0) 682 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.450868+0000 mon.vm06 (mon.0) 683 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.450868+0000 mon.vm06 (mon.0) 683 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.456477+0000 mon.vm06 (mon.0) 684 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.456477+0000 mon.vm06 (mon.0) 684 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.457356+0000 mon.vm06 (mon.0) 685 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.008 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.457356+0000 mon.vm06 (mon.0) 685 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.457972+0000 mon.vm06 (mon.0) 686 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.457972+0000 mon.vm06 (mon.0) 686 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.462196+0000 mon.vm06 (mon.0) 687 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.462196+0000 mon.vm06 (mon.0) 687 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.463654+0000 mon.vm06 (mon.0) 688 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.463654+0000 mon.vm06 (mon.0) 688 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.467970+0000 mon.vm06 (mon.0) 689 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.467970+0000 mon.vm06 (mon.0) 689 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.470399+0000 mon.vm06 (mon.0) 690 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.470399+0000 mon.vm06 (mon.0) 690 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.iwshxg", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.475216+0000 mon.vm06 (mon.0) 691 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.475216+0000 mon.vm06 (mon.0) 691 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.476410+0000 mon.vm06 (mon.0) 692 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.009 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:25 vm09 bash[34466]: audit 2026-04-15T13:35:25.476410+0000 mon.vm06 (mon.0) 692 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:26.321 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:26 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:26.321 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:26 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:26.399 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:26.399 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:26.361963Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:18.409636Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T13:35:26.482 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:27.224 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:26 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:27.483 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:27.490 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:27.490 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: cephadm 2026-04-15T13:35:25.477062+0000 mgr.vm06.qbbldl (mgr.14229) 115 : cephadm [INF] Deploying daemon rgw.foo.vm09.iwshxg on vm09 2026-04-15T13:35:27.490 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: cephadm 2026-04-15T13:35:25.477062+0000 mgr.vm06.qbbldl (mgr.14229) 115 : cephadm [INF] Deploying daemon rgw.foo.vm09.iwshxg on vm09 2026-04-15T13:35:27.490 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: cluster 2026-04-15T13:35:26.173570+0000 mgr.vm06.qbbldl (mgr.14229) 116 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:27.490 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: cluster 2026-04-15T13:35:26.173570+0000 mgr.vm06.qbbldl (mgr.14229) 116 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:27.490 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.351465+0000 mon.vm06 (mon.0) 693 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.351465+0000 mon.vm06 (mon.0) 693 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.356726+0000 mon.vm06 (mon.0) 694 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.356726+0000 mon.vm06 (mon.0) 694 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.361695+0000 mon.vm06 (mon.0) 695 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.361695+0000 mon.vm06 (mon.0) 695 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.362336+0000 mon.vm06 (mon.0) 696 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.362336+0000 mon.vm06 (mon.0) 696 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.364606+0000 mon.vm06 (mon.0) 697 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.364606+0000 mon.vm06 (mon.0) 697 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.370131+0000 mon.vm06 (mon.0) 698 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.370131+0000 mon.vm06 (mon.0) 698 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.371176+0000 mon.vm06 (mon.0) 699 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.371176+0000 mon.vm06 (mon.0) 699 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.392866+0000 mon.vm06 (mon.0) 700 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:27.491 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:27 vm06 bash[28114]: audit 2026-04-15T13:35:26.392866+0000 mon.vm06 (mon.0) 700 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: cephadm 2026-04-15T13:35:25.477062+0000 mgr.vm06.qbbldl (mgr.14229) 115 : cephadm [INF] Deploying daemon rgw.foo.vm09.iwshxg on vm09 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: cephadm 2026-04-15T13:35:25.477062+0000 mgr.vm06.qbbldl (mgr.14229) 115 : cephadm [INF] Deploying daemon rgw.foo.vm09.iwshxg on vm09 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: cluster 2026-04-15T13:35:26.173570+0000 mgr.vm06.qbbldl (mgr.14229) 116 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: cluster 2026-04-15T13:35:26.173570+0000 mgr.vm06.qbbldl (mgr.14229) 116 : cluster [DBG] pgmap v51: 1 pgs: 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.351465+0000 mon.vm06 (mon.0) 693 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.351465+0000 mon.vm06 (mon.0) 693 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.356726+0000 mon.vm06 (mon.0) 694 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.356726+0000 mon.vm06 (mon.0) 694 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.361695+0000 mon.vm06 (mon.0) 695 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.361695+0000 mon.vm06 (mon.0) 695 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.362336+0000 mon.vm06 (mon.0) 696 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.362336+0000 mon.vm06 (mon.0) 696 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.364606+0000 mon.vm06 (mon.0) 697 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.364606+0000 mon.vm06 (mon.0) 697 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.liyzhd", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.370131+0000 mon.vm06 (mon.0) 698 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.370131+0000 mon.vm06 (mon.0) 698 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.371176+0000 mon.vm06 (mon.0) 699 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.371176+0000 mon.vm06 (mon.0) 699 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.392866+0000 mon.vm06 (mon.0) 700 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:27.751 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:27 vm09 bash[34466]: audit 2026-04-15T13:35:26.392866+0000 mon.vm06 (mon.0) 700 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:27.752 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:28.150 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:28.150 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:27.452762Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:18.409636Z", "ports": [8000], "running": 0, "size": 4}}] 2026-04-15T13:35:28.222 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:28.328 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:28.328 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:28.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cephadm 2026-04-15T13:35:26.372492+0000 mgr.vm06.qbbldl (mgr.14229) 117 : cephadm [INF] Deploying daemon rgw.foo.vm06.liyzhd on vm06 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cephadm 2026-04-15T13:35:26.372492+0000 mgr.vm06.qbbldl (mgr.14229) 117 : cephadm [INF] Deploying daemon rgw.foo.vm06.liyzhd on vm06 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:26.389489+0000 mgr.vm06.qbbldl (mgr.14229) 118 : audit [DBG] from='client.14604 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:26.389489+0000 mgr.vm06.qbbldl (mgr.14229) 118 : audit [DBG] from='client.14604 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cluster 2026-04-15T13:35:27.412977+0000 mon.vm06 (mon.0) 701 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cluster 2026-04-15T13:35:27.412977+0000 mon.vm06 (mon.0) 701 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.423752+0000 mon.vm09 (mon.1) 29 : audit [INF] from='client.? 192.168.123.109:0/2999972135' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.423752+0000 mon.vm09 (mon.1) 29 : audit [INF] from='client.? 192.168.123.109:0/2999972135' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.437193+0000 mon.vm06 (mon.0) 702 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.437193+0000 mon.vm06 (mon.0) 702 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.445824+0000 mon.vm06 (mon.0) 703 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.445824+0000 mon.vm06 (mon.0) 703 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.448590+0000 mon.vm06 (mon.0) 704 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.448590+0000 mon.vm06 (mon.0) 704 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.452539+0000 mon.vm06 (mon.0) 705 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.452539+0000 mon.vm06 (mon.0) 705 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.453072+0000 mon.vm06 (mon.0) 706 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.453072+0000 mon.vm06 (mon.0) 706 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.469770+0000 mon.vm06 (mon.0) 707 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.469770+0000 mon.vm06 (mon.0) 707 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.474237+0000 mon.vm06 (mon.0) 708 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.474237+0000 mon.vm06 (mon.0) 708 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.475834+0000 mon.vm06 (mon.0) 709 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:27.475834+0000 mon.vm06 (mon.0) 709 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:28.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cephadm 2026-04-15T13:35:27.476387+0000 mgr.vm06.qbbldl (mgr.14229) 119 : cephadm [INF] Deploying daemon rgw.foo.vm09.pxnsqu on vm09 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cephadm 2026-04-15T13:35:27.476387+0000 mgr.vm06.qbbldl (mgr.14229) 119 : cephadm [INF] Deploying daemon rgw.foo.vm09.pxnsqu on vm09 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.147244+0000 mgr.vm06.qbbldl (mgr.14229) 120 : audit [DBG] from='client.14616 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.147244+0000 mgr.vm06.qbbldl (mgr.14229) 120 : audit [DBG] from='client.14616 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.148142+0000 mon.vm06 (mon.0) 710 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.148142+0000 mon.vm06 (mon.0) 710 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.148942+0000 mon.vm06 (mon.0) 711 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.148942+0000 mon.vm06 (mon.0) 711 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cluster 2026-04-15T13:35:28.173841+0000 mgr.vm06.qbbldl (mgr.14229) 121 : cluster [DBG] pgmap v53: 33 pgs: 7 creating+peering, 25 unknown, 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: cluster 2026-04-15T13:35:28.173841+0000 mgr.vm06.qbbldl (mgr.14229) 121 : cluster [DBG] pgmap v53: 33 pgs: 7 creating+peering, 25 unknown, 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.365289+0000 mon.vm06 (mon.0) 712 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.365289+0000 mon.vm06 (mon.0) 712 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.369693+0000 mon.vm06 (mon.0) 713 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.369693+0000 mon.vm06 (mon.0) 713 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.375086+0000 mon.vm06 (mon.0) 714 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 bash[28114]: audit 2026-04-15T13:35:28.375086+0000 mon.vm06 (mon.0) 714 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cephadm 2026-04-15T13:35:26.372492+0000 mgr.vm06.qbbldl (mgr.14229) 117 : cephadm [INF] Deploying daemon rgw.foo.vm06.liyzhd on vm06 2026-04-15T13:35:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cephadm 2026-04-15T13:35:26.372492+0000 mgr.vm06.qbbldl (mgr.14229) 117 : cephadm [INF] Deploying daemon rgw.foo.vm06.liyzhd on vm06 2026-04-15T13:35:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:26.389489+0000 mgr.vm06.qbbldl (mgr.14229) 118 : audit [DBG] from='client.14604 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:26.389489+0000 mgr.vm06.qbbldl (mgr.14229) 118 : audit [DBG] from='client.14604 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cluster 2026-04-15T13:35:27.412977+0000 mon.vm06 (mon.0) 701 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cluster 2026-04-15T13:35:27.412977+0000 mon.vm06 (mon.0) 701 : cluster [DBG] osdmap e25: 8 total, 8 up, 8 in 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.423752+0000 mon.vm09 (mon.1) 29 : audit [INF] from='client.? 192.168.123.109:0/2999972135' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.423752+0000 mon.vm09 (mon.1) 29 : audit [INF] from='client.? 192.168.123.109:0/2999972135' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.437193+0000 mon.vm06 (mon.0) 702 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.437193+0000 mon.vm06 (mon.0) 702 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.445824+0000 mon.vm06 (mon.0) 703 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.445824+0000 mon.vm06 (mon.0) 703 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.448590+0000 mon.vm06 (mon.0) 704 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.448590+0000 mon.vm06 (mon.0) 704 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.452539+0000 mon.vm06 (mon.0) 705 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.452539+0000 mon.vm06 (mon.0) 705 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.453072+0000 mon.vm06 (mon.0) 706 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.453072+0000 mon.vm06 (mon.0) 706 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.469770+0000 mon.vm06 (mon.0) 707 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.469770+0000 mon.vm06 (mon.0) 707 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm09.pxnsqu", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.474237+0000 mon.vm06 (mon.0) 708 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.474237+0000 mon.vm06 (mon.0) 708 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.475834+0000 mon.vm06 (mon.0) 709 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:27.475834+0000 mon.vm06 (mon.0) 709 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cephadm 2026-04-15T13:35:27.476387+0000 mgr.vm06.qbbldl (mgr.14229) 119 : cephadm [INF] Deploying daemon rgw.foo.vm09.pxnsqu on vm09 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cephadm 2026-04-15T13:35:27.476387+0000 mgr.vm06.qbbldl (mgr.14229) 119 : cephadm [INF] Deploying daemon rgw.foo.vm09.pxnsqu on vm09 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.147244+0000 mgr.vm06.qbbldl (mgr.14229) 120 : audit [DBG] from='client.14616 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.147244+0000 mgr.vm06.qbbldl (mgr.14229) 120 : audit [DBG] from='client.14616 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.148142+0000 mon.vm06 (mon.0) 710 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.148142+0000 mon.vm06 (mon.0) 710 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.148942+0000 mon.vm06 (mon.0) 711 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.148942+0000 mon.vm06 (mon.0) 711 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cluster 2026-04-15T13:35:28.173841+0000 mgr.vm06.qbbldl (mgr.14229) 121 : cluster [DBG] pgmap v53: 33 pgs: 7 creating+peering, 25 unknown, 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: cluster 2026-04-15T13:35:28.173841+0000 mgr.vm06.qbbldl (mgr.14229) 121 : cluster [DBG] pgmap v53: 33 pgs: 7 creating+peering, 25 unknown, 1 active+clean; 577 KiB data, 215 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.365289+0000 mon.vm06 (mon.0) 712 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.365289+0000 mon.vm06 (mon.0) 712 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.369693+0000 mon.vm06 (mon.0) 713 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.369693+0000 mon.vm06 (mon.0) 713 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.375086+0000 mon.vm06 (mon.0) 714 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:28.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:28 vm09 bash[34466]: audit 2026-04-15T13:35:28.375086+0000 mon.vm06 (mon.0) 714 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.199 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:28 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:29.224 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.376213+0000 mon.vm06 (mon.0) 715 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.376213+0000 mon.vm06 (mon.0) 715 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.391208+0000 mon.vm06 (mon.0) 716 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.391208+0000 mon.vm06 (mon.0) 716 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.394844+0000 mon.vm06 (mon.0) 717 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.394844+0000 mon.vm06 (mon.0) 717 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.395954+0000 mon.vm06 (mon.0) 718 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.395954+0000 mon.vm06 (mon.0) 718 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: cephadm 2026-04-15T13:35:28.396512+0000 mgr.vm06.qbbldl (mgr.14229) 122 : cephadm [INF] Deploying daemon rgw.foo.vm06.landug on vm06 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: cephadm 2026-04-15T13:35:28.396512+0000 mgr.vm06.qbbldl (mgr.14229) 122 : cephadm [INF] Deploying daemon rgw.foo.vm06.landug on vm06 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.410632+0000 mon.vm06 (mon.0) 719 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:28.410632+0000 mon.vm06 (mon.0) 719 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: cluster 2026-04-15T13:35:28.412351+0000 mon.vm06 (mon.0) 720 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: cluster 2026-04-15T13:35:28.412351+0000 mon.vm06 (mon.0) 720 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.347532+0000 mon.vm06 (mon.0) 721 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.347532+0000 mon.vm06 (mon.0) 721 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.352969+0000 mon.vm06 (mon.0) 722 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.352969+0000 mon.vm06 (mon.0) 722 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.356184+0000 mon.vm06 (mon.0) 723 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.356184+0000 mon.vm06 (mon.0) 723 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: cephadm 2026-04-15T13:35:29.356596+0000 mgr.vm06.qbbldl (mgr.14229) 123 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:29.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: cephadm 2026-04-15T13:35:29.356596+0000 mgr.vm06.qbbldl (mgr.14229) 123 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:29.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.359162+0000 mon.vm06 (mon.0) 724 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.359162+0000 mon.vm06 (mon.0) 724 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.362137+0000 mon.vm06 (mon.0) 725 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.362137+0000 mon.vm06 (mon.0) 725 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.365732+0000 mon.vm06 (mon.0) 726 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:29 vm06 bash[28114]: audit 2026-04-15T13:35:29.365732+0000 mon.vm06 (mon.0) 726 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.525 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.376213+0000 mon.vm06 (mon.0) 715 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.376213+0000 mon.vm06 (mon.0) 715 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]} : dispatch 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.391208+0000 mon.vm06 (mon.0) 716 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.391208+0000 mon.vm06 (mon.0) 716 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "auth get-or-create", "entity": "client.rgw.foo.vm06.landug", "caps": ["mon", "allow *", "mgr", "allow rw", "osd", "allow rwx tag rgw *=*"]}]': finished 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.394844+0000 mon.vm06 (mon.0) 717 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.394844+0000 mon.vm06 (mon.0) 717 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.395954+0000 mon.vm06 (mon.0) 718 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.395954+0000 mon.vm06 (mon.0) 718 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: cephadm 2026-04-15T13:35:28.396512+0000 mgr.vm06.qbbldl (mgr.14229) 122 : cephadm [INF] Deploying daemon rgw.foo.vm06.landug on vm06 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: cephadm 2026-04-15T13:35:28.396512+0000 mgr.vm06.qbbldl (mgr.14229) 122 : cephadm [INF] Deploying daemon rgw.foo.vm06.landug on vm06 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.410632+0000 mon.vm06 (mon.0) 719 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:28.410632+0000 mon.vm06 (mon.0) 719 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": ".rgw.root","app": "rgw"}]': finished 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: cluster 2026-04-15T13:35:28.412351+0000 mon.vm06 (mon.0) 720 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: cluster 2026-04-15T13:35:28.412351+0000 mon.vm06 (mon.0) 720 : cluster [DBG] osdmap e26: 8 total, 8 up, 8 in 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.347532+0000 mon.vm06 (mon.0) 721 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.347532+0000 mon.vm06 (mon.0) 721 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.352969+0000 mon.vm06 (mon.0) 722 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.352969+0000 mon.vm06 (mon.0) 722 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.356184+0000 mon.vm06 (mon.0) 723 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.356184+0000 mon.vm06 (mon.0) 723 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: cephadm 2026-04-15T13:35:29.356596+0000 mgr.vm06.qbbldl (mgr.14229) 123 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: cephadm 2026-04-15T13:35:29.356596+0000 mgr.vm06.qbbldl (mgr.14229) 123 : cephadm [INF] Saving service rgw.foo spec with placement count:4;* 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.359162+0000 mon.vm06 (mon.0) 724 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.359162+0000 mon.vm06 (mon.0) 724 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.362137+0000 mon.vm06 (mon.0) 725 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.362137+0000 mon.vm06 (mon.0) 725 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.669 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.365732+0000 mon.vm06 (mon.0) 726 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.670 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:29 vm09 bash[34466]: audit 2026-04-15T13:35:29.365732+0000 mon.vm06 (mon.0) 726 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:29.908 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:29.909 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:29.978 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: cephadm 2026-04-15T13:35:29.371267+0000 mgr.vm06.qbbldl (mgr.14229) 124 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm09.xswxmk on vm09 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: cephadm 2026-04-15T13:35:29.371267+0000 mgr.vm06.qbbldl (mgr.14229) 124 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm09.xswxmk on vm09 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: cluster 2026-04-15T13:35:29.415729+0000 mon.vm06 (mon.0) 727 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: cluster 2026-04-15T13:35:29.415729+0000 mon.vm06 (mon.0) 727 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.417620+0000 mon.vm06 (mon.0) 728 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.417620+0000 mon.vm06 (mon.0) 728 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.417747+0000 mon.vm06 (mon.0) 729 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.417747+0000 mon.vm06 (mon.0) 729 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.425585+0000 mon.vm09 (mon.1) 30 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.425585+0000 mon.vm09 (mon.1) 30 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.427743+0000 mon.vm06 (mon.0) 730 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.427743+0000 mon.vm06 (mon.0) 730 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.894832+0000 mon.vm06 (mon.0) 731 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.894832+0000 mon.vm06 (mon.0) 731 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.904481+0000 mgr.vm06.qbbldl (mgr.14229) 125 : audit [DBG] from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.904481+0000 mgr.vm06.qbbldl (mgr.14229) 125 : audit [DBG] from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.905326+0000 mon.vm06 (mon.0) 732 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.905326+0000 mon.vm06 (mon.0) 732 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.906085+0000 mon.vm06 (mon.0) 733 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.906085+0000 mon.vm06 (mon.0) 733 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.906612+0000 mon.vm06 (mon.0) 734 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.906612+0000 mon.vm06 (mon.0) 734 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.907118+0000 mon.vm06 (mon.0) 735 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: audit 2026-04-15T13:35:29.907118+0000 mon.vm06 (mon.0) 735 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: cluster 2026-04-15T13:35:30.174153+0000 mgr.vm06.qbbldl (mgr.14229) 126 : cluster [DBG] pgmap v56: 65 pgs: 19 active+clean, 19 creating+peering, 27 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-04-15T13:35:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:30 vm06 bash[28114]: cluster 2026-04-15T13:35:30.174153+0000 mgr.vm06.qbbldl (mgr.14229) 126 : cluster [DBG] pgmap v56: 65 pgs: 19 active+clean, 19 creating+peering, 27 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-04-15T13:35:30.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: cephadm 2026-04-15T13:35:29.371267+0000 mgr.vm06.qbbldl (mgr.14229) 124 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm09.xswxmk on vm09 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: cephadm 2026-04-15T13:35:29.371267+0000 mgr.vm06.qbbldl (mgr.14229) 124 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm09.xswxmk on vm09 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: cluster 2026-04-15T13:35:29.415729+0000 mon.vm06 (mon.0) 727 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: cluster 2026-04-15T13:35:29.415729+0000 mon.vm06 (mon.0) 727 : cluster [DBG] osdmap e27: 8 total, 8 up, 8 in 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.417620+0000 mon.vm06 (mon.0) 728 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.417620+0000 mon.vm06 (mon.0) 728 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.417747+0000 mon.vm06 (mon.0) 729 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.417747+0000 mon.vm06 (mon.0) 729 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.425585+0000 mon.vm09 (mon.1) 30 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.425585+0000 mon.vm09 (mon.1) 30 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.427743+0000 mon.vm06 (mon.0) 730 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.427743+0000 mon.vm06 (mon.0) 730 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.894832+0000 mon.vm06 (mon.0) 731 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.894832+0000 mon.vm06 (mon.0) 731 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.904481+0000 mgr.vm06.qbbldl (mgr.14229) 125 : audit [DBG] from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.904481+0000 mgr.vm06.qbbldl (mgr.14229) 125 : audit [DBG] from='client.14652 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.905326+0000 mon.vm06 (mon.0) 732 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.905326+0000 mon.vm06 (mon.0) 732 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.906085+0000 mon.vm06 (mon.0) 733 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.906085+0000 mon.vm06 (mon.0) 733 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.906612+0000 mon.vm06 (mon.0) 734 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.906612+0000 mon.vm06 (mon.0) 734 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.907118+0000 mon.vm06 (mon.0) 735 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: audit 2026-04-15T13:35:29.907118+0000 mon.vm06 (mon.0) 735 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: cluster 2026-04-15T13:35:30.174153+0000 mgr.vm06.qbbldl (mgr.14229) 126 : cluster [DBG] pgmap v56: 65 pgs: 19 active+clean, 19 creating+peering, 27 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-04-15T13:35:30.861 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:30 vm09 bash[34466]: cluster 2026-04-15T13:35:30.174153+0000 mgr.vm06.qbbldl (mgr.14229) 126 : cluster [DBG] pgmap v56: 65 pgs: 19 active+clean, 19 creating+peering, 27 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 6.8 KiB/s rd, 1.2 KiB/s wr, 7 op/s 2026-04-15T13:35:30.979 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:31.246 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:31.662 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:31.662 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:18.418610Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: audit 2026-04-15T13:35:30.417190+0000 mon.vm06 (mon.0) 736 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: audit 2026-04-15T13:35:30.417190+0000 mon.vm06 (mon.0) 736 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: audit 2026-04-15T13:35:30.417223+0000 mon.vm06 (mon.0) 737 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: audit 2026-04-15T13:35:30.417223+0000 mon.vm06 (mon.0) 737 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: audit 2026-04-15T13:35:30.417253+0000 mon.vm06 (mon.0) 738 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: audit 2026-04-15T13:35:30.417253+0000 mon.vm06 (mon.0) 738 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: cluster 2026-04-15T13:35:30.419389+0000 mon.vm06 (mon.0) 739 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-15T13:35:31.675 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:31 vm06 bash[28114]: cluster 2026-04-15T13:35:30.419389+0000 mon.vm06 (mon.0) 739 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-15T13:35:31.745 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: audit 2026-04-15T13:35:30.417190+0000 mon.vm06 (mon.0) 736 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: audit 2026-04-15T13:35:30.417190+0000 mon.vm06 (mon.0) 736 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: audit 2026-04-15T13:35:30.417223+0000 mon.vm06 (mon.0) 737 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: audit 2026-04-15T13:35:30.417223+0000 mon.vm06 (mon.0) 737 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: audit 2026-04-15T13:35:30.417253+0000 mon.vm06 (mon.0) 738 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: audit 2026-04-15T13:35:30.417253+0000 mon.vm06 (mon.0) 738 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.log","app": "rgw"}]': finished 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: cluster 2026-04-15T13:35:30.419389+0000 mon.vm06 (mon.0) 739 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-15T13:35:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:31 vm09 bash[34466]: cluster 2026-04-15T13:35:30.419389+0000 mon.vm06 (mon.0) 739 : cluster [DBG] osdmap e28: 8 total, 8 up, 8 in 2026-04-15T13:35:32.745 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: cluster 2026-04-15T13:35:31.440077+0000 mon.vm06 (mon.0) 740 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: cluster 2026-04-15T13:35:31.440077+0000 mon.vm06 (mon.0) 740 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.441781+0000 mon.vm06 (mon.0) 741 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.441781+0000 mon.vm06 (mon.0) 741 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.442686+0000 mon.vm06 (mon.0) 742 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.442686+0000 mon.vm06 (mon.0) 742 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.442761+0000 mon.vm06 (mon.0) 743 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.754 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.442761+0000 mon.vm06 (mon.0) 743 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.456535+0000 mon.vm09 (mon.1) 31 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.456535+0000 mon.vm09 (mon.1) 31 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.459022+0000 mon.vm06 (mon.0) 744 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.459022+0000 mon.vm06 (mon.0) 744 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.658805+0000 mgr.vm06.qbbldl (mgr.14229) 127 : audit [DBG] from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.658805+0000 mgr.vm06.qbbldl (mgr.14229) 127 : audit [DBG] from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.659535+0000 mon.vm06 (mon.0) 745 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.659535+0000 mon.vm06 (mon.0) 745 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.660191+0000 mon.vm06 (mon.0) 746 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.660191+0000 mon.vm06 (mon.0) 746 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.660632+0000 mon.vm06 (mon.0) 747 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.660632+0000 mon.vm06 (mon.0) 747 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.661095+0000 mon.vm06 (mon.0) 748 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: audit 2026-04-15T13:35:31.661095+0000 mon.vm06 (mon.0) 748 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: cluster 2026-04-15T13:35:32.174584+0000 mgr.vm06.qbbldl (mgr.14229) 128 : cluster [DBG] pgmap v59: 97 pgs: 31 active+clean, 26 creating+peering, 40 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 1.7 KiB/s wr, 10 op/s 2026-04-15T13:35:32.755 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:32 vm06 bash[28114]: cluster 2026-04-15T13:35:32.174584+0000 mgr.vm06.qbbldl (mgr.14229) 128 : cluster [DBG] pgmap v59: 97 pgs: 31 active+clean, 26 creating+peering, 40 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 1.7 KiB/s wr, 10 op/s 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: cluster 2026-04-15T13:35:31.440077+0000 mon.vm06 (mon.0) 740 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: cluster 2026-04-15T13:35:31.440077+0000 mon.vm06 (mon.0) 740 : cluster [DBG] osdmap e29: 8 total, 8 up, 8 in 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.441781+0000 mon.vm06 (mon.0) 741 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.441781+0000 mon.vm06 (mon.0) 741 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.442686+0000 mon.vm06 (mon.0) 742 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.442686+0000 mon.vm06 (mon.0) 742 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.442761+0000 mon.vm06 (mon.0) 743 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.442761+0000 mon.vm06 (mon.0) 743 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.456535+0000 mon.vm09 (mon.1) 31 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.456535+0000 mon.vm09 (mon.1) 31 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.459022+0000 mon.vm06 (mon.0) 744 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.459022+0000 mon.vm06 (mon.0) 744 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.658805+0000 mgr.vm06.qbbldl (mgr.14229) 127 : audit [DBG] from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.658805+0000 mgr.vm06.qbbldl (mgr.14229) 127 : audit [DBG] from='client.14656 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.659535+0000 mon.vm06 (mon.0) 745 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.659535+0000 mon.vm06 (mon.0) 745 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.660191+0000 mon.vm06 (mon.0) 746 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.660191+0000 mon.vm06 (mon.0) 746 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.660632+0000 mon.vm06 (mon.0) 747 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.660632+0000 mon.vm06 (mon.0) 747 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.661095+0000 mon.vm06 (mon.0) 748 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: audit 2026-04-15T13:35:31.661095+0000 mon.vm06 (mon.0) 748 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:32.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: cluster 2026-04-15T13:35:32.174584+0000 mgr.vm06.qbbldl (mgr.14229) 128 : cluster [DBG] pgmap v59: 97 pgs: 31 active+clean, 26 creating+peering, 40 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 1.7 KiB/s wr, 10 op/s 2026-04-15T13:35:32.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:32 vm09 bash[34466]: cluster 2026-04-15T13:35:32.174584+0000 mgr.vm06.qbbldl (mgr.14229) 128 : cluster [DBG] pgmap v59: 97 pgs: 31 active+clean, 26 creating+peering, 40 unknown; 578 KiB data, 214 MiB used, 160 GiB / 160 GiB avail; 10 KiB/s rd, 1.7 KiB/s wr, 10 op/s 2026-04-15T13:35:33.038 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:33.148 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:33.419 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:33.448 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:33.448 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:33.426091Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448399+0000 mon.vm06 (mon.0) 749 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448399+0000 mon.vm06 (mon.0) 749 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448467+0000 mon.vm06 (mon.0) 750 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448467+0000 mon.vm06 (mon.0) 750 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448523+0000 mon.vm06 (mon.0) 751 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448523+0000 mon.vm06 (mon.0) 751 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448586+0000 mon.vm06 (mon.0) 752 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:32.448586+0000 mon.vm06 (mon.0) 752 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: cluster 2026-04-15T13:35:32.454627+0000 mon.vm06 (mon.0) 753 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: cluster 2026-04-15T13:35:32.454627+0000 mon.vm06 (mon.0) 753 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.413405+0000 mon.vm06 (mon.0) 754 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.413405+0000 mon.vm06 (mon.0) 754 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.419348+0000 mon.vm06 (mon.0) 755 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.419348+0000 mon.vm06 (mon.0) 755 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.425889+0000 mon.vm06 (mon.0) 756 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.425889+0000 mon.vm06 (mon.0) 756 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.438350+0000 mon.vm06 (mon.0) 757 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.438350+0000 mon.vm06 (mon.0) 757 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.445331+0000 mon.vm06 (mon.0) 758 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.445331+0000 mon.vm06 (mon.0) 758 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.445854+0000 mon.vm06 (mon.0) 759 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.445854+0000 mon.vm06 (mon.0) 759 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.446273+0000 mon.vm06 (mon.0) 760 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.475 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:33 vm06 bash[28114]: audit 2026-04-15T13:35:33.446273+0000 mon.vm06 (mon.0) 760 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.547 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448399+0000 mon.vm06 (mon.0) 749 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448399+0000 mon.vm06 (mon.0) 749 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448467+0000 mon.vm06 (mon.0) 750 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448467+0000 mon.vm06 (mon.0) 750 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448523+0000 mon.vm06 (mon.0) 751 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448523+0000 mon.vm06 (mon.0) 751 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448586+0000 mon.vm06 (mon.0) 752 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:32.448586+0000 mon.vm06 (mon.0) 752 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.control","app": "rgw"}]': finished 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: cluster 2026-04-15T13:35:32.454627+0000 mon.vm06 (mon.0) 753 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: cluster 2026-04-15T13:35:32.454627+0000 mon.vm06 (mon.0) 753 : cluster [DBG] osdmap e30: 8 total, 8 up, 8 in 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.413405+0000 mon.vm06 (mon.0) 754 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.413405+0000 mon.vm06 (mon.0) 754 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.419348+0000 mon.vm06 (mon.0) 755 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.419348+0000 mon.vm06 (mon.0) 755 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.425889+0000 mon.vm06 (mon.0) 756 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.425889+0000 mon.vm06 (mon.0) 756 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.438350+0000 mon.vm06 (mon.0) 757 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.438350+0000 mon.vm06 (mon.0) 757 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.445331+0000 mon.vm06 (mon.0) 758 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.445331+0000 mon.vm06 (mon.0) 758 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.445854+0000 mon.vm06 (mon.0) 759 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.445854+0000 mon.vm06 (mon.0) 759 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.446273+0000 mon.vm06 (mon.0) 760 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:33.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:33 vm09 bash[34466]: audit 2026-04-15T13:35:33.446273+0000 mon.vm06 (mon.0) 760 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:34.549 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cephadm 2026-04-15T13:35:33.427256+0000 mgr.vm06.qbbldl (mgr.14229) 129 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm06.ndmjsv on vm06 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cephadm 2026-04-15T13:35:33.427256+0000 mgr.vm06.qbbldl (mgr.14229) 129 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm06.ndmjsv on vm06 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.437732+0000 mgr.vm06.qbbldl (mgr.14229) 130 : audit [DBG] from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.437732+0000 mgr.vm06.qbbldl (mgr.14229) 130 : audit [DBG] from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cluster 2026-04-15T13:35:33.457098+0000 mon.vm06 (mon.0) 761 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cluster 2026-04-15T13:35:33.457098+0000 mon.vm06 (mon.0) 761 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.464824+0000 mon.vm06 (mon.0) 762 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.464824+0000 mon.vm06 (mon.0) 762 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.465082+0000 mon.vm09 (mon.1) 32 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.465082+0000 mon.vm09 (mon.1) 32 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.478217+0000 mon.vm06 (mon.0) 763 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.478217+0000 mon.vm06 (mon.0) 763 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.478342+0000 mon.vm06 (mon.0) 764 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.478342+0000 mon.vm06 (mon.0) 764 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.478726+0000 mon.vm06 (mon.0) 765 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:33.478726+0000 mon.vm06 (mon.0) 765 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cluster 2026-04-15T13:35:34.174971+0000 mgr.vm06.qbbldl (mgr.14229) 131 : cluster [DBG] pgmap v62: 129 pgs: 59 active+clean, 21 creating+peering, 49 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 767 B/s wr, 5 op/s 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cluster 2026-04-15T13:35:34.174971+0000 mgr.vm06.qbbldl (mgr.14229) 131 : cluster [DBG] pgmap v62: 129 pgs: 59 active+clean, 21 creating+peering, 49 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 767 B/s wr, 5 op/s 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457706+0000 mon.vm06 (mon.0) 766 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457706+0000 mon.vm06 (mon.0) 766 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457788+0000 mon.vm06 (mon.0) 767 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457788+0000 mon.vm06 (mon.0) 767 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457816+0000 mon.vm06 (mon.0) 768 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457816+0000 mon.vm06 (mon.0) 768 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457861+0000 mon.vm06 (mon.0) 769 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.457861+0000 mon.vm06 (mon.0) 769 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cluster 2026-04-15T13:35:34.461162+0000 mon.vm06 (mon.0) 770 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: cluster 2026-04-15T13:35:34.461162+0000 mon.vm06 (mon.0) 770 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462134+0000 mon.vm09 (mon.1) 33 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462134+0000 mon.vm09 (mon.1) 33 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462271+0000 mon.vm06 (mon.0) 771 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462271+0000 mon.vm06 (mon.0) 771 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462364+0000 mon.vm06 (mon.0) 772 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462364+0000 mon.vm06 (mon.0) 772 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462418+0000 mon.vm06 (mon.0) 773 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.462418+0000 mon.vm06 (mon.0) 773 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.464752+0000 mon.vm06 (mon.0) 774 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:34 vm06 bash[28114]: audit 2026-04-15T13:35:34.464752+0000 mon.vm06 (mon.0) 774 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.822 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cephadm 2026-04-15T13:35:33.427256+0000 mgr.vm06.qbbldl (mgr.14229) 129 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm06.ndmjsv on vm06 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cephadm 2026-04-15T13:35:33.427256+0000 mgr.vm06.qbbldl (mgr.14229) 129 : cephadm [INF] Deploying daemon haproxy.rgw.foo.vm06.ndmjsv on vm06 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.437732+0000 mgr.vm06.qbbldl (mgr.14229) 130 : audit [DBG] from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.437732+0000 mgr.vm06.qbbldl (mgr.14229) 130 : audit [DBG] from='client.14660 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cluster 2026-04-15T13:35:33.457098+0000 mon.vm06 (mon.0) 761 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cluster 2026-04-15T13:35:33.457098+0000 mon.vm06 (mon.0) 761 : cluster [DBG] osdmap e31: 8 total, 8 up, 8 in 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.464824+0000 mon.vm06 (mon.0) 762 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.464824+0000 mon.vm06 (mon.0) 762 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.465082+0000 mon.vm09 (mon.1) 32 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.465082+0000 mon.vm09 (mon.1) 32 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.478217+0000 mon.vm06 (mon.0) 763 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.478217+0000 mon.vm06 (mon.0) 763 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.478342+0000 mon.vm06 (mon.0) 764 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.478342+0000 mon.vm06 (mon.0) 764 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.478726+0000 mon.vm06 (mon.0) 765 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:33.478726+0000 mon.vm06 (mon.0) 765 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cluster 2026-04-15T13:35:34.174971+0000 mgr.vm06.qbbldl (mgr.14229) 131 : cluster [DBG] pgmap v62: 129 pgs: 59 active+clean, 21 creating+peering, 49 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 767 B/s wr, 5 op/s 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cluster 2026-04-15T13:35:34.174971+0000 mgr.vm06.qbbldl (mgr.14229) 131 : cluster [DBG] pgmap v62: 129 pgs: 59 active+clean, 21 creating+peering, 49 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 767 B/s wr, 5 op/s 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457706+0000 mon.vm06 (mon.0) 766 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457706+0000 mon.vm06 (mon.0) 766 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457788+0000 mon.vm06 (mon.0) 767 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457788+0000 mon.vm06 (mon.0) 767 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457816+0000 mon.vm06 (mon.0) 768 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457816+0000 mon.vm06 (mon.0) 768 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457861+0000 mon.vm06 (mon.0) 769 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.457861+0000 mon.vm06 (mon.0) 769 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool application enable","pool": "default.rgw.meta","app": "rgw"}]': finished 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cluster 2026-04-15T13:35:34.461162+0000 mon.vm06 (mon.0) 770 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: cluster 2026-04-15T13:35:34.461162+0000 mon.vm06 (mon.0) 770 : cluster [DBG] osdmap e32: 8 total, 8 up, 8 in 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462134+0000 mon.vm09 (mon.1) 33 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462134+0000 mon.vm09 (mon.1) 33 : audit [INF] from='client.? 192.168.123.109:0/414146888' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462271+0000 mon.vm06 (mon.0) 771 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462271+0000 mon.vm06 (mon.0) 771 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462364+0000 mon.vm06 (mon.0) 772 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462364+0000 mon.vm06 (mon.0) 772 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462418+0000 mon.vm06 (mon.0) 773 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.462418+0000 mon.vm06 (mon.0) 773 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.464752+0000 mon.vm06 (mon.0) 774 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:34 vm09 bash[34466]: audit 2026-04-15T13:35:34.464752+0000 mon.vm06 (mon.0) 774 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd={"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"} : dispatch 2026-04-15T13:35:35.353 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:35.353 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:33.426091Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:35.445 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.349274+0000 mgr.vm06.qbbldl (mgr.14229) 132 : audit [DBG] from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.349274+0000 mgr.vm06.qbbldl (mgr.14229) 132 : audit [DBG] from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.350059+0000 mon.vm06 (mon.0) 775 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.350059+0000 mon.vm06 (mon.0) 775 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.350785+0000 mon.vm06 (mon.0) 776 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.350785+0000 mon.vm06 (mon.0) 776 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.351311+0000 mon.vm06 (mon.0) 777 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.351311+0000 mon.vm06 (mon.0) 777 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.351800+0000 mon.vm06 (mon.0) 778 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.351800+0000 mon.vm06 (mon.0) 778 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461123+0000 mon.vm06 (mon.0) 779 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461123+0000 mon.vm06 (mon.0) 779 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461255+0000 mon.vm06 (mon.0) 780 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461255+0000 mon.vm06 (mon.0) 780 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461317+0000 mon.vm06 (mon.0) 781 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461317+0000 mon.vm06 (mon.0) 781 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461370+0000 mon.vm06 (mon.0) 782 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: audit 2026-04-15T13:35:35.461370+0000 mon.vm06 (mon.0) 782 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: cluster 2026-04-15T13:35:35.464230+0000 mon.vm06 (mon.0) 783 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-15T13:35:35.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:35 vm06 bash[28114]: cluster 2026-04-15T13:35:35.464230+0000 mon.vm06 (mon.0) 783 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-15T13:35:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.349274+0000 mgr.vm06.qbbldl (mgr.14229) 132 : audit [DBG] from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.349274+0000 mgr.vm06.qbbldl (mgr.14229) 132 : audit [DBG] from='client.14664 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.350059+0000 mon.vm06 (mon.0) 775 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.350059+0000 mon.vm06 (mon.0) 775 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.350785+0000 mon.vm06 (mon.0) 776 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.350785+0000 mon.vm06 (mon.0) 776 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.351311+0000 mon.vm06 (mon.0) 777 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.351311+0000 mon.vm06 (mon.0) 777 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.351800+0000 mon.vm06 (mon.0) 778 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.351800+0000 mon.vm06 (mon.0) 778 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461123+0000 mon.vm06 (mon.0) 779 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461123+0000 mon.vm06 (mon.0) 779 : audit [INF] from='client.? 192.168.123.106:0/3242464620' entity='client.rgw.foo.vm06.liyzhd' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461255+0000 mon.vm06 (mon.0) 780 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461255+0000 mon.vm06 (mon.0) 780 : audit [INF] from='client.? 192.168.123.109:0/3043381567' entity='client.rgw.foo.vm09.iwshxg' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461317+0000 mon.vm06 (mon.0) 781 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461317+0000 mon.vm06 (mon.0) 781 : audit [INF] from='client.? 192.168.123.106:0/3713981111' entity='client.rgw.foo.vm06.landug' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461370+0000 mon.vm06 (mon.0) 782 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: audit 2026-04-15T13:35:35.461370+0000 mon.vm06 (mon.0) 782 : audit [INF] from='client.? ' entity='client.rgw.foo.vm09.pxnsqu' cmd='[{"prefix": "osd pool set", "pool": "default.rgw.meta", "var": "pg_autoscale_bias", "val": "4"}]': finished 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: cluster 2026-04-15T13:35:35.464230+0000 mon.vm06 (mon.0) 783 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-15T13:35:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:35 vm09 bash[34466]: cluster 2026-04-15T13:35:35.464230+0000 mon.vm06 (mon.0) 783 : cluster [DBG] osdmap e33: 8 total, 8 up, 8 in 2026-04-15T13:35:36.446 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:36.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:36 vm06 bash[28114]: cluster 2026-04-15T13:35:36.175377+0000 mgr.vm06.qbbldl (mgr.14229) 133 : cluster [DBG] pgmap v65: 129 pgs: 106 active+clean, 9 creating+peering, 14 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 1023 B/s wr, 8 op/s 2026-04-15T13:35:36.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:36 vm06 bash[28114]: cluster 2026-04-15T13:35:36.175377+0000 mgr.vm06.qbbldl (mgr.14229) 133 : cluster [DBG] pgmap v65: 129 pgs: 106 active+clean, 9 creating+peering, 14 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 1023 B/s wr, 8 op/s 2026-04-15T13:35:36.785 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:36 vm09 bash[34466]: cluster 2026-04-15T13:35:36.175377+0000 mgr.vm06.qbbldl (mgr.14229) 133 : cluster [DBG] pgmap v65: 129 pgs: 106 active+clean, 9 creating+peering, 14 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 1023 B/s wr, 8 op/s 2026-04-15T13:35:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:36 vm09 bash[34466]: cluster 2026-04-15T13:35:36.175377+0000 mgr.vm06.qbbldl (mgr.14229) 133 : cluster [DBG] pgmap v65: 129 pgs: 106 active+clean, 9 creating+peering, 14 unknown; 578 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 4.5 KiB/s rd, 1023 B/s wr, 8 op/s 2026-04-15T13:35:37.391 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:37.407 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:37.407 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:33.426091Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:37.696 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.402968+0000 mon.vm06 (mon.0) 784 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.402968+0000 mon.vm06 (mon.0) 784 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.404045+0000 mon.vm06 (mon.0) 785 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.404045+0000 mon.vm06 (mon.0) 785 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.404891+0000 mon.vm06 (mon.0) 786 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.404891+0000 mon.vm06 (mon.0) 786 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.405571+0000 mon.vm06 (mon.0) 787 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 bash[28114]: audit 2026-04-15T13:35:37.405571+0000 mon.vm06 (mon.0) 787 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.697 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:37 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:37.725 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.402968+0000 mon.vm06 (mon.0) 784 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.402968+0000 mon.vm06 (mon.0) 784 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.404045+0000 mon.vm06 (mon.0) 785 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.404045+0000 mon.vm06 (mon.0) 785 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.404891+0000 mon.vm06 (mon.0) 786 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.404891+0000 mon.vm06 (mon.0) 786 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.405571+0000 mon.vm06 (mon.0) 787 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:37 vm09 bash[34466]: audit 2026-04-15T13:35:37.405571+0000 mon.vm06 (mon.0) 787 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:38.726 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.402201+0000 mgr.vm06.qbbldl (mgr.14229) 134 : audit [DBG] from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.402201+0000 mgr.vm06.qbbldl (mgr.14229) 134 : audit [DBG] from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.687005+0000 mon.vm06 (mon.0) 788 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.687005+0000 mon.vm06 (mon.0) 788 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.697261+0000 mon.vm06 (mon.0) 789 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.697261+0000 mon.vm06 (mon.0) 789 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.705987+0000 mon.vm06 (mon.0) 790 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.705987+0000 mon.vm06 (mon.0) 790 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.719132+0000 mon.vm06 (mon.0) 791 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:37.719132+0000 mon.vm06 (mon.0) 791 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cephadm 2026-04-15T13:35:37.719541+0000 mgr.vm06.qbbldl (mgr.14229) 135 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cephadm 2026-04-15T13:35:37.719541+0000 mgr.vm06.qbbldl (mgr.14229) 135 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cephadm 2026-04-15T13:35:37.719583+0000 mgr.vm06.qbbldl (mgr.14229) 136 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cephadm 2026-04-15T13:35:37.719583+0000 mgr.vm06.qbbldl (mgr.14229) 136 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cephadm 2026-04-15T13:35:37.722806+0000 mgr.vm06.qbbldl (mgr.14229) 137 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm09.cfsofe on vm09 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cephadm 2026-04-15T13:35:37.722806+0000 mgr.vm06.qbbldl (mgr.14229) 137 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm09.cfsofe on vm09 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cluster 2026-04-15T13:35:38.175862+0000 mgr.vm06.qbbldl (mgr.14229) 138 : cluster [DBG] pgmap v66: 129 pgs: 127 active+clean, 2 creating+peering; 583 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 6.8 KiB/s wr, 429 op/s 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: cluster 2026-04-15T13:35:38.175862+0000 mgr.vm06.qbbldl (mgr.14229) 138 : cluster [DBG] pgmap v66: 129 pgs: 127 active+clean, 2 creating+peering; 583 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 6.8 KiB/s wr, 429 op/s 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:38.481453+0000 mon.vm06 (mon.0) 792 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:38 vm09 bash[34466]: audit 2026-04-15T13:35:38.481453+0000 mon.vm06 (mon.0) 792 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:39.005 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.402201+0000 mgr.vm06.qbbldl (mgr.14229) 134 : audit [DBG] from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.402201+0000 mgr.vm06.qbbldl (mgr.14229) 134 : audit [DBG] from='client.14668 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.687005+0000 mon.vm06 (mon.0) 788 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.687005+0000 mon.vm06 (mon.0) 788 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.697261+0000 mon.vm06 (mon.0) 789 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.697261+0000 mon.vm06 (mon.0) 789 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.705987+0000 mon.vm06 (mon.0) 790 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.705987+0000 mon.vm06 (mon.0) 790 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.719132+0000 mon.vm06 (mon.0) 791 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:37.719132+0000 mon.vm06 (mon.0) 791 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cephadm 2026-04-15T13:35:37.719541+0000 mgr.vm06.qbbldl (mgr.14229) 135 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cephadm 2026-04-15T13:35:37.719541+0000 mgr.vm06.qbbldl (mgr.14229) 135 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cephadm 2026-04-15T13:35:37.719583+0000 mgr.vm06.qbbldl (mgr.14229) 136 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cephadm 2026-04-15T13:35:37.719583+0000 mgr.vm06.qbbldl (mgr.14229) 136 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cephadm 2026-04-15T13:35:37.722806+0000 mgr.vm06.qbbldl (mgr.14229) 137 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm09.cfsofe on vm09 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cephadm 2026-04-15T13:35:37.722806+0000 mgr.vm06.qbbldl (mgr.14229) 137 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm09.cfsofe on vm09 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cluster 2026-04-15T13:35:38.175862+0000 mgr.vm06.qbbldl (mgr.14229) 138 : cluster [DBG] pgmap v66: 129 pgs: 127 active+clean, 2 creating+peering; 583 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 6.8 KiB/s wr, 429 op/s 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: cluster 2026-04-15T13:35:38.175862+0000 mgr.vm06.qbbldl (mgr.14229) 138 : cluster [DBG] pgmap v66: 129 pgs: 127 active+clean, 2 creating+peering; 583 KiB data, 215 MiB used, 160 GiB / 160 GiB avail; 233 KiB/s rd, 6.8 KiB/s wr, 429 op/s 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:38.481453+0000 mon.vm06 (mon.0) 792 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:39.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:38 vm06 bash[28114]: audit 2026-04-15T13:35:38.481453+0000 mon.vm06 (mon.0) 792 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:39.384 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:39.384 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:37.714283Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:39.479 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.381714+0000 mon.vm06 (mon.0) 793 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.381714+0000 mon.vm06 (mon.0) 793 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.382372+0000 mon.vm06 (mon.0) 794 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.382372+0000 mon.vm06 (mon.0) 794 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.382832+0000 mon.vm06 (mon.0) 795 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.382832+0000 mon.vm06 (mon.0) 795 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.383258+0000 mon.vm06 (mon.0) 796 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:39 vm06 bash[28114]: audit 2026-04-15T13:35:39.383258+0000 mon.vm06 (mon.0) 796 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.381714+0000 mon.vm06 (mon.0) 793 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.381714+0000 mon.vm06 (mon.0) 793 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.382372+0000 mon.vm06 (mon.0) 794 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.382372+0000 mon.vm06 (mon.0) 794 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.382832+0000 mon.vm06 (mon.0) 795 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.382832+0000 mon.vm06 (mon.0) 795 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.383258+0000 mon.vm06 (mon.0) 796 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:39 vm09 bash[34466]: audit 2026-04-15T13:35:39.383258+0000 mon.vm06 (mon.0) 796 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:40.480 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:40.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:40 vm06 bash[28114]: audit 2026-04-15T13:35:39.380988+0000 mgr.vm06.qbbldl (mgr.14229) 139 : audit [DBG] from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:40.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:40 vm06 bash[28114]: audit 2026-04-15T13:35:39.380988+0000 mgr.vm06.qbbldl (mgr.14229) 139 : audit [DBG] from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:40.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:40 vm06 bash[28114]: cluster 2026-04-15T13:35:40.176308+0000 mgr.vm06.qbbldl (mgr.14229) 140 : cluster [DBG] pgmap v67: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 382 KiB/s rd, 9.7 KiB/s wr, 705 op/s 2026-04-15T13:35:40.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:40 vm06 bash[28114]: cluster 2026-04-15T13:35:40.176308+0000 mgr.vm06.qbbldl (mgr.14229) 140 : cluster [DBG] pgmap v67: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 382 KiB/s rd, 9.7 KiB/s wr, 705 op/s 2026-04-15T13:35:40.794 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:41.072 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:40 vm09 bash[34466]: audit 2026-04-15T13:35:39.380988+0000 mgr.vm06.qbbldl (mgr.14229) 139 : audit [DBG] from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:41.072 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:40 vm09 bash[34466]: audit 2026-04-15T13:35:39.380988+0000 mgr.vm06.qbbldl (mgr.14229) 139 : audit [DBG] from='client.14672 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:41.072 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:40 vm09 bash[34466]: cluster 2026-04-15T13:35:40.176308+0000 mgr.vm06.qbbldl (mgr.14229) 140 : cluster [DBG] pgmap v67: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 382 KiB/s rd, 9.7 KiB/s wr, 705 op/s 2026-04-15T13:35:41.073 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:40 vm09 bash[34466]: cluster 2026-04-15T13:35:40.176308+0000 mgr.vm06.qbbldl (mgr.14229) 140 : cluster [DBG] pgmap v67: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 382 KiB/s rd, 9.7 KiB/s wr, 705 op/s 2026-04-15T13:35:41.196 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:41.196 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:37.714283Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:41.263 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.191904+0000 mgr.vm06.qbbldl (mgr.14229) 141 : audit [DBG] from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.191904+0000 mgr.vm06.qbbldl (mgr.14229) 141 : audit [DBG] from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.192771+0000 mon.vm06 (mon.0) 797 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.192771+0000 mon.vm06 (mon.0) 797 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.193674+0000 mon.vm06 (mon.0) 798 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.193674+0000 mon.vm06 (mon.0) 798 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.194340+0000 mon.vm06 (mon.0) 799 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.194340+0000 mon.vm06 (mon.0) 799 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.195008+0000 mon.vm06 (mon.0) 800 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.242 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:41 vm09 bash[34466]: audit 2026-04-15T13:35:41.195008+0000 mon.vm06 (mon.0) 800 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.191904+0000 mgr.vm06.qbbldl (mgr.14229) 141 : audit [DBG] from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.191904+0000 mgr.vm06.qbbldl (mgr.14229) 141 : audit [DBG] from='client.14676 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.192771+0000 mon.vm06 (mon.0) 797 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.192771+0000 mon.vm06 (mon.0) 797 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.193674+0000 mon.vm06 (mon.0) 798 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.193674+0000 mon.vm06 (mon.0) 798 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.194340+0000 mon.vm06 (mon.0) 799 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.194340+0000 mon.vm06 (mon.0) 799 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.195008+0000 mon.vm06 (mon.0) 800 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:41 vm06 bash[28114]: audit 2026-04-15T13:35:41.195008+0000 mon.vm06 (mon.0) 800 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:42.264 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:42.492 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:42.559 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:42.992 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:42.992 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:42.641667Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:43.064 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: cluster 2026-04-15T13:35:42.176813+0000 mgr.vm06.qbbldl (mgr.14229) 142 : cluster [DBG] pgmap v68: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 321 KiB/s rd, 8.1 KiB/s wr, 592 op/s 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: cluster 2026-04-15T13:35:42.176813+0000 mgr.vm06.qbbldl (mgr.14229) 142 : cluster [DBG] pgmap v68: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 321 KiB/s rd, 8.1 KiB/s wr, 592 op/s 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: audit 2026-04-15T13:35:42.631330+0000 mon.vm06 (mon.0) 801 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: audit 2026-04-15T13:35:42.631330+0000 mon.vm06 (mon.0) 801 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: audit 2026-04-15T13:35:42.636763+0000 mon.vm06 (mon.0) 802 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: audit 2026-04-15T13:35:42.636763+0000 mon.vm06 (mon.0) 802 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: audit 2026-04-15T13:35:42.641081+0000 mon.vm06 (mon.0) 803 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:42 vm06 bash[28114]: audit 2026-04-15T13:35:42.641081+0000 mon.vm06 (mon.0) 803 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: cluster 2026-04-15T13:35:42.176813+0000 mgr.vm06.qbbldl (mgr.14229) 142 : cluster [DBG] pgmap v68: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 321 KiB/s rd, 8.1 KiB/s wr, 592 op/s 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: cluster 2026-04-15T13:35:42.176813+0000 mgr.vm06.qbbldl (mgr.14229) 142 : cluster [DBG] pgmap v68: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 321 KiB/s rd, 8.1 KiB/s wr, 592 op/s 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: audit 2026-04-15T13:35:42.631330+0000 mon.vm06 (mon.0) 801 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: audit 2026-04-15T13:35:42.631330+0000 mon.vm06 (mon.0) 801 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: audit 2026-04-15T13:35:42.636763+0000 mon.vm06 (mon.0) 802 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: audit 2026-04-15T13:35:42.636763+0000 mon.vm06 (mon.0) 802 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: audit 2026-04-15T13:35:42.641081+0000 mon.vm06 (mon.0) 803 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:42 vm09 bash[34466]: audit 2026-04-15T13:35:42.641081+0000 mon.vm06 (mon.0) 803 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:44.065 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: cephadm 2026-04-15T13:35:42.642499+0000 mgr.vm06.qbbldl (mgr.14229) 143 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: cephadm 2026-04-15T13:35:42.642499+0000 mgr.vm06.qbbldl (mgr.14229) 143 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: cephadm 2026-04-15T13:35:42.642538+0000 mgr.vm06.qbbldl (mgr.14229) 144 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: cephadm 2026-04-15T13:35:42.642538+0000 mgr.vm06.qbbldl (mgr.14229) 144 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: cephadm 2026-04-15T13:35:42.642918+0000 mgr.vm06.qbbldl (mgr.14229) 145 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm06.mhhxjk on vm06 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: cephadm 2026-04-15T13:35:42.642918+0000 mgr.vm06.qbbldl (mgr.14229) 145 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm06.mhhxjk on vm06 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.983945+0000 mgr.vm06.qbbldl (mgr.14229) 146 : audit [DBG] from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.983945+0000 mgr.vm06.qbbldl (mgr.14229) 146 : audit [DBG] from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.984604+0000 mon.vm06 (mon.0) 804 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.984604+0000 mon.vm06 (mon.0) 804 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.985319+0000 mon.vm06 (mon.0) 805 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.985319+0000 mon.vm06 (mon.0) 805 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.988997+0000 mon.vm06 (mon.0) 806 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.988997+0000 mon.vm06 (mon.0) 806 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.990370+0000 mon.vm06 (mon.0) 807 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:43 vm06 bash[28114]: audit 2026-04-15T13:35:42.990370+0000 mon.vm06 (mon.0) 807 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: cephadm 2026-04-15T13:35:42.642499+0000 mgr.vm06.qbbldl (mgr.14229) 143 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: cephadm 2026-04-15T13:35:42.642499+0000 mgr.vm06.qbbldl (mgr.14229) 143 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm06 interface ens3 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: cephadm 2026-04-15T13:35:42.642538+0000 mgr.vm06.qbbldl (mgr.14229) 144 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: cephadm 2026-04-15T13:35:42.642538+0000 mgr.vm06.qbbldl (mgr.14229) 144 : cephadm [INF] 12.12.1.106 is in 12.12.0.0/22 on vm09 interface ens3 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: cephadm 2026-04-15T13:35:42.642918+0000 mgr.vm06.qbbldl (mgr.14229) 145 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm06.mhhxjk on vm06 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: cephadm 2026-04-15T13:35:42.642918+0000 mgr.vm06.qbbldl (mgr.14229) 145 : cephadm [INF] Deploying daemon keepalived.rgw.foo.vm06.mhhxjk on vm06 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.983945+0000 mgr.vm06.qbbldl (mgr.14229) 146 : audit [DBG] from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.983945+0000 mgr.vm06.qbbldl (mgr.14229) 146 : audit [DBG] from='client.14680 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.984604+0000 mon.vm06 (mon.0) 804 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.984604+0000 mon.vm06 (mon.0) 804 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.985319+0000 mon.vm06 (mon.0) 805 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.985319+0000 mon.vm06 (mon.0) 805 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.988997+0000 mon.vm06 (mon.0) 806 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.988997+0000 mon.vm06 (mon.0) 806 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.990370+0000 mon.vm06 (mon.0) 807 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:43 vm09 bash[34466]: audit 2026-04-15T13:35:42.990370+0000 mon.vm06 (mon.0) 807 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:44.364 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:44.779 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:44.779 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:42.641667Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:44.872 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: cluster 2026-04-15T13:35:44.177189+0000 mgr.vm06.qbbldl (mgr.14229) 147 : cluster [DBG] pgmap v69: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 264 KiB/s rd, 6.7 KiB/s wr, 487 op/s 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: cluster 2026-04-15T13:35:44.177189+0000 mgr.vm06.qbbldl (mgr.14229) 147 : cluster [DBG] pgmap v69: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 264 KiB/s rd, 6.7 KiB/s wr, 487 op/s 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.775102+0000 mon.vm06 (mon.0) 808 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.775102+0000 mon.vm06 (mon.0) 808 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.775898+0000 mon.vm06 (mon.0) 809 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.775898+0000 mon.vm06 (mon.0) 809 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.776428+0000 mon.vm06 (mon.0) 810 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.776428+0000 mon.vm06 (mon.0) 810 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.776904+0000 mon.vm06 (mon.0) 811 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:44 vm06 bash[28114]: audit 2026-04-15T13:35:44.776904+0000 mon.vm06 (mon.0) 811 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: cluster 2026-04-15T13:35:44.177189+0000 mgr.vm06.qbbldl (mgr.14229) 147 : cluster [DBG] pgmap v69: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 264 KiB/s rd, 6.7 KiB/s wr, 487 op/s 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: cluster 2026-04-15T13:35:44.177189+0000 mgr.vm06.qbbldl (mgr.14229) 147 : cluster [DBG] pgmap v69: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 264 KiB/s rd, 6.7 KiB/s wr, 487 op/s 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.775102+0000 mon.vm06 (mon.0) 808 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.775102+0000 mon.vm06 (mon.0) 808 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.775898+0000 mon.vm06 (mon.0) 809 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.775898+0000 mon.vm06 (mon.0) 809 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.776428+0000 mon.vm06 (mon.0) 810 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.776428+0000 mon.vm06 (mon.0) 810 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.776904+0000 mon.vm06 (mon.0) 811 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:44 vm09 bash[34466]: audit 2026-04-15T13:35:44.776904+0000 mon.vm06 (mon.0) 811 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:45.873 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:46.150 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:46.182 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:45 vm06 bash[28114]: audit 2026-04-15T13:35:44.774060+0000 mgr.vm06.qbbldl (mgr.14229) 148 : audit [DBG] from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:46.183 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:45 vm06 bash[28114]: audit 2026-04-15T13:35:44.774060+0000 mgr.vm06.qbbldl (mgr.14229) 148 : audit [DBG] from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:46.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:45 vm09 bash[34466]: audit 2026-04-15T13:35:44.774060+0000 mgr.vm06.qbbldl (mgr.14229) 148 : audit [DBG] from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:46.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:45 vm09 bash[34466]: audit 2026-04-15T13:35:44.774060+0000 mgr.vm06.qbbldl (mgr.14229) 148 : audit [DBG] from='client.14684 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:46.875 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:46.876 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:42.641667Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:46.979 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:46 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: cluster 2026-04-15T13:35:46.177604+0000 mgr.vm06.qbbldl (mgr.14229) 149 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 239 KiB/s rd, 6.0 KiB/s wr, 441 op/s 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: cluster 2026-04-15T13:35:46.177604+0000 mgr.vm06.qbbldl (mgr.14229) 149 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 239 KiB/s rd, 6.0 KiB/s wr, 441 op/s 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.870753+0000 mon.vm06 (mon.0) 812 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.870753+0000 mon.vm06 (mon.0) 812 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.872752+0000 mon.vm06 (mon.0) 813 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.872752+0000 mon.vm06 (mon.0) 813 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.873235+0000 mon.vm06 (mon.0) 814 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.873235+0000 mon.vm06 (mon.0) 814 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.873718+0000 mon.vm06 (mon.0) 815 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.049 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 bash[28114]: audit 2026-04-15T13:35:46.873718+0000 mon.vm06 (mon.0) 815 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.234 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:47 vm06 systemd[1]: /etc/systemd/system/ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@.service:23: Unit configured to use KillMode=none. This is unsafe, as it disables systemd's process lifecycle management for the service. Please update your service to use a safer KillMode=, such as 'mixed' or 'control-group'. Support for KillMode=none is deprecated and will eventually be removed. 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: cluster 2026-04-15T13:35:46.177604+0000 mgr.vm06.qbbldl (mgr.14229) 149 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 239 KiB/s rd, 6.0 KiB/s wr, 441 op/s 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: cluster 2026-04-15T13:35:46.177604+0000 mgr.vm06.qbbldl (mgr.14229) 149 : cluster [DBG] pgmap v70: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 239 KiB/s rd, 6.0 KiB/s wr, 441 op/s 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.870753+0000 mon.vm06 (mon.0) 812 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.870753+0000 mon.vm06 (mon.0) 812 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.872752+0000 mon.vm06 (mon.0) 813 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.872752+0000 mon.vm06 (mon.0) 813 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.873235+0000 mon.vm06 (mon.0) 814 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.873235+0000 mon.vm06 (mon.0) 814 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.873718+0000 mon.vm06 (mon.0) 815 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:47 vm09 bash[34466]: audit 2026-04-15T13:35:46.873718+0000 mon.vm06 (mon.0) 815 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:47.979 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:48.235 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:46.870074+0000 mgr.vm06.qbbldl (mgr.14229) 150 : audit [DBG] from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:46.870074+0000 mgr.vm06.qbbldl (mgr.14229) 150 : audit [DBG] from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.182924+0000 mon.vm06 (mon.0) 816 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.182924+0000 mon.vm06 (mon.0) 816 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.188686+0000 mon.vm06 (mon.0) 817 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.188686+0000 mon.vm06 (mon.0) 817 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.193376+0000 mon.vm06 (mon.0) 818 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.193376+0000 mon.vm06 (mon.0) 818 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.197980+0000 mon.vm06 (mon.0) 819 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.197980+0000 mon.vm06 (mon.0) 819 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.214292+0000 mon.vm06 (mon.0) 820 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:48.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:48 vm06 bash[28114]: audit 2026-04-15T13:35:47.214292+0000 mon.vm06 (mon.0) 820 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:46.870074+0000 mgr.vm06.qbbldl (mgr.14229) 150 : audit [DBG] from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:46.870074+0000 mgr.vm06.qbbldl (mgr.14229) 150 : audit [DBG] from='client.14688 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.182924+0000 mon.vm06 (mon.0) 816 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.182924+0000 mon.vm06 (mon.0) 816 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.188686+0000 mon.vm06 (mon.0) 817 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.188686+0000 mon.vm06 (mon.0) 817 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.193376+0000 mon.vm06 (mon.0) 818 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.193376+0000 mon.vm06 (mon.0) 818 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.197980+0000 mon.vm06 (mon.0) 819 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.197980+0000 mon.vm06 (mon.0) 819 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.214292+0000 mon.vm06 (mon.0) 820 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:48.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:48 vm09 bash[34466]: audit 2026-04-15T13:35:47.214292+0000 mon.vm06 (mon.0) 820 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:48.625 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:48.625 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:47.198372Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:48.691 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: cluster 2026-04-15T13:35:48.178001+0000 mgr.vm06.qbbldl (mgr.14229) 151 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 213 KiB/s rd, 5.3 KiB/s wr, 394 op/s 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: cluster 2026-04-15T13:35:48.178001+0000 mgr.vm06.qbbldl (mgr.14229) 151 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 213 KiB/s rd, 5.3 KiB/s wr, 394 op/s 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.621750+0000 mon.vm06 (mon.0) 821 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.621750+0000 mon.vm06 (mon.0) 821 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.622428+0000 mon.vm06 (mon.0) 822 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.622428+0000 mon.vm06 (mon.0) 822 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.622897+0000 mon.vm06 (mon.0) 823 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.622897+0000 mon.vm06 (mon.0) 823 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.623333+0000 mon.vm06 (mon.0) 824 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:49 vm06 bash[28114]: audit 2026-04-15T13:35:48.623333+0000 mon.vm06 (mon.0) 824 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: cluster 2026-04-15T13:35:48.178001+0000 mgr.vm06.qbbldl (mgr.14229) 151 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 213 KiB/s rd, 5.3 KiB/s wr, 394 op/s 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: cluster 2026-04-15T13:35:48.178001+0000 mgr.vm06.qbbldl (mgr.14229) 151 : cluster [DBG] pgmap v71: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 213 KiB/s rd, 5.3 KiB/s wr, 394 op/s 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.621750+0000 mon.vm06 (mon.0) 821 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.621750+0000 mon.vm06 (mon.0) 821 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.622428+0000 mon.vm06 (mon.0) 822 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.622428+0000 mon.vm06 (mon.0) 822 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.622897+0000 mon.vm06 (mon.0) 823 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.622897+0000 mon.vm06 (mon.0) 823 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.623333+0000 mon.vm06 (mon.0) 824 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:49 vm09 bash[34466]: audit 2026-04-15T13:35:48.623333+0000 mon.vm06 (mon.0) 824 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:49.692 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:49.980 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:50.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:50 vm06 bash[28114]: audit 2026-04-15T13:35:48.621025+0000 mgr.vm06.qbbldl (mgr.14229) 152 : audit [DBG] from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:50.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:50 vm06 bash[28114]: audit 2026-04-15T13:35:48.621025+0000 mgr.vm06.qbbldl (mgr.14229) 152 : audit [DBG] from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:50.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:50 vm06 bash[28114]: audit 2026-04-15T13:35:49.903743+0000 mon.vm06 (mon.0) 825 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:50.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:50 vm06 bash[28114]: audit 2026-04-15T13:35:49.903743+0000 mon.vm06 (mon.0) 825 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:50.375 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:50.375 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.243225Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.242981Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:47.198372Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "ports": [9000, 9001], "running": 0, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.243081Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.243273Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.243030Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.242842Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "ports": [8000, 8001], "running": 0, "size": 4}}] 2026-04-15T13:35:50.451 INFO:tasks.cephadm:rgw.foo has 0/4 2026-04-15T13:35:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:50 vm09 bash[34466]: audit 2026-04-15T13:35:48.621025+0000 mgr.vm06.qbbldl (mgr.14229) 152 : audit [DBG] from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:50 vm09 bash[34466]: audit 2026-04-15T13:35:48.621025+0000 mgr.vm06.qbbldl (mgr.14229) 152 : audit [DBG] from='client.24405 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:50 vm09 bash[34466]: audit 2026-04-15T13:35:49.903743+0000 mon.vm06 (mon.0) 825 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:50 vm09 bash[34466]: audit 2026-04-15T13:35:49.903743+0000 mon.vm06 (mon.0) 825 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:51.452 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: cluster 2026-04-15T13:35:50.178479+0000 mgr.vm06.qbbldl (mgr.14229) 153 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 104 KiB/s rd, 2.4 KiB/s wr, 191 op/s 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: cluster 2026-04-15T13:35:50.178479+0000 mgr.vm06.qbbldl (mgr.14229) 153 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 104 KiB/s rd, 2.4 KiB/s wr, 191 op/s 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.372071+0000 mon.vm06 (mon.0) 826 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.372071+0000 mon.vm06 (mon.0) 826 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.372953+0000 mon.vm06 (mon.0) 827 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.372953+0000 mon.vm06 (mon.0) 827 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.373371+0000 mon.vm06 (mon.0) 828 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.373371+0000 mon.vm06 (mon.0) 828 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.373764+0000 mon.vm06 (mon.0) 829 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:51 vm06 bash[28114]: audit 2026-04-15T13:35:50.373764+0000 mon.vm06 (mon.0) 829 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: cluster 2026-04-15T13:35:50.178479+0000 mgr.vm06.qbbldl (mgr.14229) 153 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 104 KiB/s rd, 2.4 KiB/s wr, 191 op/s 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: cluster 2026-04-15T13:35:50.178479+0000 mgr.vm06.qbbldl (mgr.14229) 153 : cluster [DBG] pgmap v72: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 104 KiB/s rd, 2.4 KiB/s wr, 191 op/s 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.372071+0000 mon.vm06 (mon.0) 826 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.372071+0000 mon.vm06 (mon.0) 826 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.372953+0000 mon.vm06 (mon.0) 827 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.372953+0000 mon.vm06 (mon.0) 827 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.373371+0000 mon.vm06 (mon.0) 828 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.373371+0000 mon.vm06 (mon.0) 828 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.373764+0000 mon.vm06 (mon.0) 829 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:51 vm09 bash[34466]: audit 2026-04-15T13:35:50.373764+0000 mon.vm06 (mon.0) 829 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:51.726 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:52.179 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:52.179 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:22.852862Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:22.852831Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:22.852761Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:22.852792Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:47.198372Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "last_refresh": "2026-04-15T13:35:52.083240Z", "ports": [9000, 9001], "running": 2, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:22.852615Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:22.852652Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:22.852684Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:22.852402Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "last_refresh": "2026-04-15T13:35:22.852461Z", "ports": [9095], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "last_refresh": "2026-04-15T13:35:52.083087Z", "ports": [8000, 8001], "running": 2, "size": 4}}] 2026-04-15T13:35:52.305 INFO:tasks.cephadm:rgw.foo has 2/4 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:50.371339+0000 mgr.vm06.qbbldl (mgr.14229) 154 : audit [DBG] from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:50.371339+0000 mgr.vm06.qbbldl (mgr.14229) 154 : audit [DBG] from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.090178+0000 mon.vm06 (mon.0) 830 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.090178+0000 mon.vm06 (mon.0) 830 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.095917+0000 mon.vm06 (mon.0) 831 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.095917+0000 mon.vm06 (mon.0) 831 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.173293+0000 mon.vm06 (mon.0) 832 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.173293+0000 mon.vm06 (mon.0) 832 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.174021+0000 mon.vm06 (mon.0) 833 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.174021+0000 mon.vm06 (mon.0) 833 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.174512+0000 mon.vm06 (mon.0) 834 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.174512+0000 mon.vm06 (mon.0) 834 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.175016+0000 mon.vm06 (mon.0) 835 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:52 vm06 bash[28114]: audit 2026-04-15T13:35:52.175016+0000 mon.vm06 (mon.0) 835 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:50.371339+0000 mgr.vm06.qbbldl (mgr.14229) 154 : audit [DBG] from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:50.371339+0000 mgr.vm06.qbbldl (mgr.14229) 154 : audit [DBG] from='client.14696 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.090178+0000 mon.vm06 (mon.0) 830 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.090178+0000 mon.vm06 (mon.0) 830 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.095917+0000 mon.vm06 (mon.0) 831 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.095917+0000 mon.vm06 (mon.0) 831 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.173293+0000 mon.vm06 (mon.0) 832 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.173293+0000 mon.vm06 (mon.0) 832 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.174021+0000 mon.vm06 (mon.0) 833 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.174021+0000 mon.vm06 (mon.0) 833 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.174512+0000 mon.vm06 (mon.0) 834 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.174512+0000 mon.vm06 (mon.0) 834 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.175016+0000 mon.vm06 (mon.0) 835 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:52.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:52 vm09 bash[34466]: audit 2026-04-15T13:35:52.175016+0000 mon.vm06 (mon.0) 835 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.172577+0000 mgr.vm06.qbbldl (mgr.14229) 155 : audit [DBG] from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.172577+0000 mgr.vm06.qbbldl (mgr.14229) 155 : audit [DBG] from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: cluster 2026-04-15T13:35:52.178781+0000 mgr.vm06.qbbldl (mgr.14229) 156 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: cluster 2026-04-15T13:35:52.178781+0000 mgr.vm06.qbbldl (mgr.14229) 156 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.645803+0000 mon.vm06 (mon.0) 836 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.645803+0000 mon.vm06 (mon.0) 836 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.650737+0000 mon.vm06 (mon.0) 837 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.650737+0000 mon.vm06 (mon.0) 837 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.651549+0000 mon.vm06 (mon.0) 838 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.651549+0000 mon.vm06 (mon.0) 838 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.651996+0000 mon.vm06 (mon.0) 839 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:52.651996+0000 mon.vm06 (mon.0) 839 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.007547+0000 mon.vm06 (mon.0) 840 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.007547+0000 mon.vm06 (mon.0) 840 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.013389+0000 mon.vm06 (mon.0) 841 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.013389+0000 mon.vm06 (mon.0) 841 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.022720+0000 mon.vm06 (mon.0) 842 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.022720+0000 mon.vm06 (mon.0) 842 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.025364+0000 mon.vm06 (mon.0) 843 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:53 vm06 bash[28114]: audit 2026-04-15T13:35:53.025364+0000 mon.vm06 (mon.0) 843 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:53.306 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:53.604 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.172577+0000 mgr.vm06.qbbldl (mgr.14229) 155 : audit [DBG] from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.172577+0000 mgr.vm06.qbbldl (mgr.14229) 155 : audit [DBG] from='client.14700 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: cluster 2026-04-15T13:35:52.178781+0000 mgr.vm06.qbbldl (mgr.14229) 156 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: cluster 2026-04-15T13:35:52.178781+0000 mgr.vm06.qbbldl (mgr.14229) 156 : cluster [DBG] pgmap v73: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:35:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.645803+0000 mon.vm06 (mon.0) 836 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.645803+0000 mon.vm06 (mon.0) 836 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.650737+0000 mon.vm06 (mon.0) 837 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.650737+0000 mon.vm06 (mon.0) 837 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.651549+0000 mon.vm06 (mon.0) 838 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.651549+0000 mon.vm06 (mon.0) 838 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.651996+0000 mon.vm06 (mon.0) 839 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:52.651996+0000 mon.vm06 (mon.0) 839 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.007547+0000 mon.vm06 (mon.0) 840 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.007547+0000 mon.vm06 (mon.0) 840 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.013389+0000 mon.vm06 (mon.0) 841 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.013389+0000 mon.vm06 (mon.0) 841 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.022720+0000 mon.vm06 (mon.0) 842 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.022720+0000 mon.vm06 (mon.0) 842 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.025364+0000 mon.vm06 (mon.0) 843 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:53.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:53 vm09 bash[34466]: audit 2026-04-15T13:35:53.025364+0000 mon.vm06 (mon.0) 843 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:35:54.098 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:54.099 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:52.639216Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:52.083394Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:52.083175Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:52.639156Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:47.198372Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "last_refresh": "2026-04-15T13:35:52.083240Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:52.083306Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:52.083423Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:52.083208Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:52.083030Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "ports": [9095], "running": 0, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "last_refresh": "2026-04-15T13:35:52.083087Z", "ports": [8000, 8001], "running": 4, "size": 4}}] 2026-04-15T13:35:54.224 INFO:tasks.cephadm:rgw.foo has 4/4 2026-04-15T13:35:54.224 INFO:teuthology.run_tasks:Running task cephadm.wait_for_service... 2026-04-15T13:35:54.226 INFO:tasks.cephadm:Waiting for ceph service ingress.rgw.foo to start (timeout 300)... 2026-04-15T13:35:54.227 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph orch ls -f json 2026-04-15T13:35:54.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: cephadm 2026-04-15T13:35:52.654347+0000 mgr.vm06.qbbldl (mgr.14229) 157 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: cephadm 2026-04-15T13:35:52.654347+0000 mgr.vm06.qbbldl (mgr.14229) 157 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: cephadm 2026-04-15T13:35:53.041865+0000 mgr.vm06.qbbldl (mgr.14229) 158 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'ingress', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: cephadm 2026-04-15T13:35:53.041865+0000 mgr.vm06.qbbldl (mgr.14229) 158 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'ingress', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: cephadm 2026-04-15T13:35:53.194847+0000 mgr.vm06.qbbldl (mgr.14229) 159 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: cephadm 2026-04-15T13:35:53.194847+0000 mgr.vm06.qbbldl (mgr.14229) 159 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.393532+0000 mon.vm06 (mon.0) 844 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.393532+0000 mon.vm06 (mon.0) 844 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.481658+0000 mon.vm06 (mon.0) 845 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.481658+0000 mon.vm06 (mon.0) 845 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.508924+0000 mon.vm06 (mon.0) 846 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.508924+0000 mon.vm06 (mon.0) 846 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.981330+0000 mon.vm06 (mon.0) 847 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.981330+0000 mon.vm06 (mon.0) 847 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.988474+0000 mon.vm06 (mon.0) 848 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.988474+0000 mon.vm06 (mon.0) 848 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.995578+0000 mon.vm06 (mon.0) 849 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:53.995578+0000 mon.vm06 (mon.0) 849 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.002004+0000 mon.vm06 (mon.0) 850 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.002004+0000 mon.vm06 (mon.0) 850 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.008288+0000 mon.vm06 (mon.0) 851 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.008288+0000 mon.vm06 (mon.0) 851 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.014784+0000 mon.vm06 (mon.0) 852 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.014784+0000 mon.vm06 (mon.0) 852 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.026131+0000 mon.vm06 (mon.0) 853 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.026131+0000 mon.vm06 (mon.0) 853 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.038507+0000 mon.vm06 (mon.0) 854 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.038507+0000 mon.vm06 (mon.0) 854 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.045474+0000 mon.vm06 (mon.0) 855 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.045474+0000 mon.vm06 (mon.0) 855 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.052382+0000 mon.vm06 (mon.0) 856 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.052382+0000 mon.vm06 (mon.0) 856 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.502 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.055858+0000 mon.vm06 (mon.0) 857 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.055858+0000 mon.vm06 (mon.0) 857 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.087548+0000 mon.vm06 (mon.0) 858 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.087548+0000 mon.vm06 (mon.0) 858 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.093077+0000 mon.vm06 (mon.0) 859 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.093077+0000 mon.vm06 (mon.0) 859 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.094189+0000 mon.vm06 (mon.0) 860 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.094189+0000 mon.vm06 (mon.0) 860 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.094971+0000 mon.vm06 (mon.0) 861 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.503 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:54 vm06 bash[28114]: audit 2026-04-15T13:35:54.094971+0000 mon.vm06 (mon.0) 861 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.507 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: cephadm 2026-04-15T13:35:52.654347+0000 mgr.vm06.qbbldl (mgr.14229) 157 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: cephadm 2026-04-15T13:35:52.654347+0000 mgr.vm06.qbbldl (mgr.14229) 157 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: cephadm 2026-04-15T13:35:53.041865+0000 mgr.vm06.qbbldl (mgr.14229) 158 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'ingress', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: cephadm 2026-04-15T13:35:53.041865+0000 mgr.vm06.qbbldl (mgr.14229) 158 : cephadm [INF] Reconfiguring prometheus.vm06 deps ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] -> ['8765', '9283', 'alertmanager', 'ceph-exporter.vm06', 'ceph-exporter.vm09', 'ingress', 'mgr.vm06.qbbldl', 'node-exporter', 'secure_monitoring_stack:False'] (diff {'ingress'}) 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: cephadm 2026-04-15T13:35:53.194847+0000 mgr.vm06.qbbldl (mgr.14229) 159 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: cephadm 2026-04-15T13:35:53.194847+0000 mgr.vm06.qbbldl (mgr.14229) 159 : cephadm [INF] Reconfiguring daemon prometheus.vm06 on vm06 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.393532+0000 mon.vm06 (mon.0) 844 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.393532+0000 mon.vm06 (mon.0) 844 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.481658+0000 mon.vm06 (mon.0) 845 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.481658+0000 mon.vm06 (mon.0) 845 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.508924+0000 mon.vm06 (mon.0) 846 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.508924+0000 mon.vm06 (mon.0) 846 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "orch get-security-config"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.981330+0000 mon.vm06 (mon.0) 847 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.981330+0000 mon.vm06 (mon.0) 847 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.988474+0000 mon.vm06 (mon.0) 848 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.988474+0000 mon.vm06 (mon.0) 848 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.995578+0000 mon.vm06 (mon.0) 849 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:53.995578+0000 mon.vm06 (mon.0) 849 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.002004+0000 mon.vm06 (mon.0) 850 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.002004+0000 mon.vm06 (mon.0) 850 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.008288+0000 mon.vm06 (mon.0) 851 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.008288+0000 mon.vm06 (mon.0) 851 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.014784+0000 mon.vm06 (mon.0) 852 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.014784+0000 mon.vm06 (mon.0) 852 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.026131+0000 mon.vm06 (mon.0) 853 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.026131+0000 mon.vm06 (mon.0) 853 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.038507+0000 mon.vm06 (mon.0) 854 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.038507+0000 mon.vm06 (mon.0) 854 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.045474+0000 mon.vm06 (mon.0) 855 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.045474+0000 mon.vm06 (mon.0) 855 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.052382+0000 mon.vm06 (mon.0) 856 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.052382+0000 mon.vm06 (mon.0) 856 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.055858+0000 mon.vm06 (mon.0) 857 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.055858+0000 mon.vm06 (mon.0) 857 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.087548+0000 mon.vm06 (mon.0) 858 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.087548+0000 mon.vm06 (mon.0) 858 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.093077+0000 mon.vm06 (mon.0) 859 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.093077+0000 mon.vm06 (mon.0) 859 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.094189+0000 mon.vm06 (mon.0) 860 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.094189+0000 mon.vm06 (mon.0) 860 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.094971+0000 mon.vm06 (mon.0) 861 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:54 vm09 bash[34466]: audit 2026-04-15T13:35:54.094971+0000 mon.vm06 (mon.0) 861 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:54.873 INFO:teuthology.orchestra.run.vm06.stdout: 2026-04-15T13:35:54.873 INFO:teuthology.orchestra.run.vm06.stdout:[{"placement": {"count": 1}, "service_name": "alertmanager", "service_type": "alertmanager", "status": {"created": "2026-04-15T13:33:19.172876Z", "last_refresh": "2026-04-15T13:35:52.639216Z", "ports": [9093, 9094], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:34:16.043261Z service:ceph-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "ceph-exporter", "service_type": "ceph-exporter", "spec": {"prio_limit": 5, "stats_period": 5}, "status": {"created": "2026-04-15T13:33:17.411253Z", "last_refresh": "2026-04-15T13:35:52.083394Z", "ports": [9926], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:16.921485Z service:crash [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "crash", "service_type": "crash", "status": {"created": "2026-04-15T13:33:16.942536Z", "last_refresh": "2026-04-15T13:35:52.083175Z", "running": 2, "size": 2}}, {"placement": {"count": 1}, "service_name": "grafana", "service_type": "grafana", "spec": {"anonymous_access": true, "protocol": "https"}, "status": {"created": "2026-04-15T13:33:18.324266Z", "last_refresh": "2026-04-15T13:35:52.639156Z", "ports": [3000], "running": 1, "size": 1}}, {"events": ["2026-04-15T13:35:47.198372Z service:ingress.rgw.foo [INFO] \"service was created\""], "placement": {"count": 2}, "service_id": "rgw.foo", "service_name": "ingress.rgw.foo", "service_type": "ingress", "spec": {"backend_service": "rgw.foo", "first_virtual_router_id": 50, "frontend_port": 9000, "monitor_port": 9001, "virtual_ip": "12.12.1.106/22"}, "status": {"created": "2026-04-15T13:35:18.414551Z", "last_refresh": "2026-04-15T13:35:52.083240Z", "ports": [9000, 9001], "running": 4, "size": 4, "virtual_ip": "12.12.1.106/22"}}, {"events": ["2026-04-15T13:34:18.488893Z service:mgr [INFO] \"service was created\""], "placement": {"count": 2}, "service_name": "mgr", "service_type": "mgr", "status": {"created": "2026-04-15T13:33:16.441441Z", "last_refresh": "2026-04-15T13:35:52.083306Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:19.506293Z service:mon [INFO] \"service was created\""], "placement": {"count": 2, "hosts": ["vm06:192.168.123.106=vm06", "vm09:192.168.123.109=vm09"]}, "service_name": "mon", "service_type": "mon", "status": {"created": "2026-04-15T13:33:42.285419Z", "last_refresh": "2026-04-15T13:35:52.083423Z", "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:17.644752Z service:node-exporter [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_name": "node-exporter", "service_type": "node-exporter", "status": {"created": "2026-04-15T13:33:18.737078Z", "last_refresh": "2026-04-15T13:35:52.083208Z", "ports": [9100], "running": 2, "size": 2}}, {"events": ["2026-04-15T13:34:29.792623Z service:osd.all-available-devices [INFO] \"service was created\""], "placement": {"host_pattern": "*"}, "service_id": "all-available-devices", "service_name": "osd.all-available-devices", "service_type": "osd", "spec": {"data_devices": {"all": true}, "filter_logic": "AND", "objectstore": "bluestore"}, "status": {"created": "2026-04-15T13:34:29.789052Z", "last_refresh": "2026-04-15T13:35:52.083030Z", "running": 8, "size": 8}}, {"events": ["2026-04-15T13:34:19.509162Z service:prometheus [INFO] \"service was created\""], "placement": {"count": 1}, "service_name": "prometheus", "service_type": "prometheus", "status": {"created": "2026-04-15T13:33:17.901263Z", "ports": [9095], "running": 0, "size": 1}}, {"events": ["2026-04-15T13:35:29.362346Z service:rgw.foo [INFO] \"service was created\""], "placement": {"count": 4, "host_pattern": "*"}, "service_id": "foo", "service_name": "rgw.foo", "service_type": "rgw", "spec": {"rgw_exit_timeout_secs": 120, "rgw_frontend_port": 8000}, "status": {"created": "2026-04-15T13:35:29.356602Z", "last_refresh": "2026-04-15T13:35:52.083087Z", "ports": [8000, 8001], "running": 4, "size": 4}}] 2026-04-15T13:35:54.959 INFO:tasks.cephadm:ingress.rgw.foo has 4/4 2026-04-15T13:35:54.959 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T13:35:54.962 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'echo "Check while healthy..." 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> curl http://12.12.1.106:9000/ 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> # stop each rgw in turn 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> echo "Check with each rgw stopped in turn..." 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> for rgw in `ceph orch ps | grep ^rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> ceph orch daemon stop $rgw 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep stopped; do echo '"'"'Waiting for $rgw to stop'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! curl http://12.12.1.106:9000/ ; do echo '"'"'Waiting for http://12.12.1.106:9000/ to be available'"'"'; sleep 1 ; done" 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> ceph orch daemon start $rgw 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! ceph orch ps | grep $rgw | grep running; do echo '"'"'Waiting for $rgw to start'"'"'; ceph orch ps --daemon-type rgw; ceph health detail; sleep 5 ; done" 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> done 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> # stop each haproxy in turn 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> echo "Check with each haproxy down in turn..." 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '"'"'{print $1}'"'"'`; do 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> ceph orch daemon stop $haproxy 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep stopped; do echo '"'"'Waiting for $haproxy to stop'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! curl http://12.12.1.106:9000/ ; do echo '"'"'Waiting for http://12.12.1.106:9000/ to be available'"'"'; sleep 1 ; done" 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> ceph orch daemon start $haproxy 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! ceph orch ps | grep $haproxy | grep running; do echo '"'"'Waiting for $haproxy to start'"'"'; ceph orch ps --daemon-type haproxy; ceph health detail; sleep 5 ; done" 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> done 2026-04-15T13:35:54.962 DEBUG:teuthology.orchestra.run.vm06:> 2026-04-15T13:35:54.963 DEBUG:teuthology.orchestra.run.vm06:> timeout 300 bash -c "while ! curl http://12.12.1.106:9000/ ; do echo '"'"'Waiting for http://12.12.1.106:9000/ to be available'"'"'; sleep 1 ; done"' 2026-04-15T13:35:55.255 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:35:55.337 INFO:teuthology.orchestra.run.vm06.stdout:Check while healthy... 2026-04-15T13:35:55.340 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:35:55.341 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:35:55.342 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:35:55.342 INFO:teuthology.orchestra.run.vm06.stdout:anonymousCheck with each rgw stopped in turn... 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:53.394155+0000 mgr.vm06.qbbldl (mgr.14229) 160 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:53.394155+0000 mgr.vm06.qbbldl (mgr.14229) 160 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:53.509463+0000 mgr.vm06.qbbldl (mgr.14229) 161 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:53.509463+0000 mgr.vm06.qbbldl (mgr.14229) 161 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.085363+0000 mgr.vm06.qbbldl (mgr.14229) 162 : audit [DBG] from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.085363+0000 mgr.vm06.qbbldl (mgr.14229) 162 : audit [DBG] from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: cluster 2026-04-15T13:35:54.179356+0000 mgr.vm06.qbbldl (mgr.14229) 163 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 85 B/s wr, 10 op/s 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: cluster 2026-04-15T13:35:54.179356+0000 mgr.vm06.qbbldl (mgr.14229) 163 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 85 B/s wr, 10 op/s 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.869165+0000 mon.vm06 (mon.0) 862 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.869165+0000 mon.vm06 (mon.0) 862 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.869862+0000 mon.vm06 (mon.0) 863 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.869862+0000 mon.vm06 (mon.0) 863 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.870345+0000 mon.vm06 (mon.0) 864 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.870345+0000 mon.vm06 (mon.0) 864 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.871277+0000 mon.vm06 (mon.0) 865 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.501 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:55 vm06 bash[28114]: audit 2026-04-15T13:35:54.871277+0000 mon.vm06 (mon.0) 865 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:53.394155+0000 mgr.vm06.qbbldl (mgr.14229) 160 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:53.394155+0000 mgr.vm06.qbbldl (mgr.14229) 160 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:53.509463+0000 mgr.vm06.qbbldl (mgr.14229) 161 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:53.509463+0000 mgr.vm06.qbbldl (mgr.14229) 161 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "orch get-security-config"}]: dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.085363+0000 mgr.vm06.qbbldl (mgr.14229) 162 : audit [DBG] from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.085363+0000 mgr.vm06.qbbldl (mgr.14229) 162 : audit [DBG] from='client.14738 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: cluster 2026-04-15T13:35:54.179356+0000 mgr.vm06.qbbldl (mgr.14229) 163 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 85 B/s wr, 10 op/s 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: cluster 2026-04-15T13:35:54.179356+0000 mgr.vm06.qbbldl (mgr.14229) 163 : cluster [DBG] pgmap v74: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.0 KiB/s rd, 85 B/s wr, 10 op/s 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.869165+0000 mon.vm06 (mon.0) 862 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.869165+0000 mon.vm06 (mon.0) 862 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.869862+0000 mon.vm06 (mon.0) 863 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.869862+0000 mon.vm06 (mon.0) 863 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.870345+0000 mon.vm06 (mon.0) 864 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.870345+0000 mon.vm06 (mon.0) 864 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.871277+0000 mon.vm06 (mon.0) 865 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:55 vm09 bash[34466]: audit 2026-04-15T13:35:54.871277+0000 mon.vm06 (mon.0) 865 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:35:55.880 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop rgw.foo.vm06.landug on host 'vm06' 2026-04-15T13:35:56.093 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.landug to stop 2026-04-15T13:35:56.280 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:35:56.280 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (26s) 3s ago 26s 96.8M - 20.2.0-19-g7ec4401a095 b4cb326006c0 cf506a8c903c 2026-04-15T13:35:56.280 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (28s) 3s ago 28s 97.0M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:35:56.280 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (29s) 4s ago 29s 97.0M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:35:56.280 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (27s) 4s ago 27s 97.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:35:56.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:56 vm06 bash[28114]: audit 2026-04-15T13:35:54.868426+0000 mgr.vm06.qbbldl (mgr.14229) 164 : audit [DBG] from='client.24439 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:56.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:56 vm06 bash[28114]: audit 2026-04-15T13:35:54.868426+0000 mgr.vm06.qbbldl (mgr.14229) 164 : audit [DBG] from='client.24439 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:56.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:56 vm06 bash[28114]: audit 2026-04-15T13:35:55.875630+0000 mon.vm06 (mon.0) 866 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:56 vm06 bash[28114]: audit 2026-04-15T13:35:55.875630+0000 mon.vm06 (mon.0) 866 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:56 vm06 bash[28114]: audit 2026-04-15T13:35:55.880824+0000 mon.vm06 (mon.0) 867 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:56 vm06 bash[28114]: audit 2026-04-15T13:35:55.880824+0000 mon.vm06 (mon.0) 867 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.517 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:35:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:56 vm09 bash[34466]: audit 2026-04-15T13:35:54.868426+0000 mgr.vm06.qbbldl (mgr.14229) 164 : audit [DBG] from='client.24439 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:56 vm09 bash[34466]: audit 2026-04-15T13:35:54.868426+0000 mgr.vm06.qbbldl (mgr.14229) 164 : audit [DBG] from='client.24439 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""], "format": "json"}]: dispatch 2026-04-15T13:35:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:56 vm09 bash[34466]: audit 2026-04-15T13:35:55.875630+0000 mon.vm06 (mon.0) 866 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:56 vm09 bash[34466]: audit 2026-04-15T13:35:55.875630+0000 mon.vm06 (mon.0) 866 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:56 vm09 bash[34466]: audit 2026-04-15T13:35:55.880824+0000 mon.vm06 (mon.0) 867 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:56 vm09 bash[34466]: audit 2026-04-15T13:35:55.880824+0000 mon.vm06 (mon.0) 867 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:55.655566+0000 mgr.vm06.qbbldl (mgr.14229) 165 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:55.655566+0000 mgr.vm06.qbbldl (mgr.14229) 165 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:55.868030+0000 mgr.vm06.qbbldl (mgr.14229) 166 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:55.868030+0000 mgr.vm06.qbbldl (mgr.14229) 166 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: cephadm 2026-04-15T13:35:55.868550+0000 mgr.vm06.qbbldl (mgr.14229) 167 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.landug 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: cephadm 2026-04-15T13:35:55.868550+0000 mgr.vm06.qbbldl (mgr.14229) 167 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.landug 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:56.077688+0000 mgr.vm06.qbbldl (mgr.14229) 168 : audit [DBG] from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:56.077688+0000 mgr.vm06.qbbldl (mgr.14229) 168 : audit [DBG] from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: cluster 2026-04-15T13:35:56.179857+0000 mgr.vm06.qbbldl (mgr.14229) 169 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 85 B/s wr, 19 op/s 2026-04-15T13:35:57.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: cluster 2026-04-15T13:35:56.179857+0000 mgr.vm06.qbbldl (mgr.14229) 169 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 85 B/s wr, 19 op/s 2026-04-15T13:35:57.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:56.278038+0000 mgr.vm06.qbbldl (mgr.14229) 170 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:56.278038+0000 mgr.vm06.qbbldl (mgr.14229) 170 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:56.518238+0000 mon.vm06 (mon.0) 868 : audit [DBG] from='client.? 192.168.123.106:0/2540734095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:35:57.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:57 vm06 bash[28114]: audit 2026-04-15T13:35:56.518238+0000 mon.vm06 (mon.0) 868 : audit [DBG] from='client.? 192.168.123.106:0/2540734095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:55.655566+0000 mgr.vm06.qbbldl (mgr.14229) 165 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:55.655566+0000 mgr.vm06.qbbldl (mgr.14229) 165 : audit [DBG] from='client.14746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:55.868030+0000 mgr.vm06.qbbldl (mgr.14229) 166 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:55.868030+0000 mgr.vm06.qbbldl (mgr.14229) 166 : audit [DBG] from='client.14750 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: cephadm 2026-04-15T13:35:55.868550+0000 mgr.vm06.qbbldl (mgr.14229) 167 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.landug 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: cephadm 2026-04-15T13:35:55.868550+0000 mgr.vm06.qbbldl (mgr.14229) 167 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.landug 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:56.077688+0000 mgr.vm06.qbbldl (mgr.14229) 168 : audit [DBG] from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:56.077688+0000 mgr.vm06.qbbldl (mgr.14229) 168 : audit [DBG] from='client.14754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: cluster 2026-04-15T13:35:56.179857+0000 mgr.vm06.qbbldl (mgr.14229) 169 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 85 B/s wr, 19 op/s 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: cluster 2026-04-15T13:35:56.179857+0000 mgr.vm06.qbbldl (mgr.14229) 169 : cluster [DBG] pgmap v75: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 85 B/s wr, 19 op/s 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:56.278038+0000 mgr.vm06.qbbldl (mgr.14229) 170 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:56.278038+0000 mgr.vm06.qbbldl (mgr.14229) 170 : audit [DBG] from='client.14758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:56.518238+0000 mon.vm06 (mon.0) 868 : audit [DBG] from='client.? 192.168.123.106:0/2540734095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:35:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:57 vm09 bash[34466]: audit 2026-04-15T13:35:56.518238+0000 mon.vm06 (mon.0) 868 : audit [DBG] from='client.? 192.168.123.106:0/2540734095' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:35:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:59 vm09 bash[34466]: cluster 2026-04-15T13:35:58.180273+0000 mgr.vm06.qbbldl (mgr.14229) 171 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-15T13:35:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:59 vm09 bash[34466]: cluster 2026-04-15T13:35:58.180273+0000 mgr.vm06.qbbldl (mgr.14229) 171 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-15T13:35:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:59 vm09 bash[34466]: audit 2026-04-15T13:35:58.949233+0000 mon.vm06 (mon.0) 869 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:59 vm09 bash[34466]: audit 2026-04-15T13:35:58.949233+0000 mon.vm06 (mon.0) 869 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:59 vm09 bash[34466]: audit 2026-04-15T13:35:58.953873+0000 mon.vm06 (mon.0) 870 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:35:59 vm09 bash[34466]: audit 2026-04-15T13:35:58.953873+0000 mon.vm06 (mon.0) 870 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:59 vm06 bash[28114]: cluster 2026-04-15T13:35:58.180273+0000 mgr.vm06.qbbldl (mgr.14229) 171 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-15T13:35:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:59 vm06 bash[28114]: cluster 2026-04-15T13:35:58.180273+0000 mgr.vm06.qbbldl (mgr.14229) 171 : cluster [DBG] pgmap v76: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 170 B/s wr, 23 op/s 2026-04-15T13:35:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:59 vm06 bash[28114]: audit 2026-04-15T13:35:58.949233+0000 mon.vm06 (mon.0) 869 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:59 vm06 bash[28114]: audit 2026-04-15T13:35:58.949233+0000 mon.vm06 (mon.0) 869 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:59 vm06 bash[28114]: audit 2026-04-15T13:35:58.953873+0000 mon.vm06 (mon.0) 870 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:35:59.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:35:59 vm06 bash[28114]: audit 2026-04-15T13:35:58.953873+0000 mon.vm06 (mon.0) 870 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.516963+0000 mon.vm06 (mon.0) 871 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.516963+0000 mon.vm06 (mon.0) 871 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.523345+0000 mon.vm06 (mon.0) 872 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.523345+0000 mon.vm06 (mon.0) 872 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.524472+0000 mon.vm06 (mon.0) 873 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.524472+0000 mon.vm06 (mon.0) 873 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.525104+0000 mon.vm06 (mon.0) 874 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.525104+0000 mon.vm06 (mon.0) 874 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: cephadm 2026-04-15T13:35:59.528371+0000 mgr.vm06.qbbldl (mgr.14229) 172 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: cephadm 2026-04-15T13:35:59.528371+0000 mgr.vm06.qbbldl (mgr.14229) 172 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.734451+0000 mon.vm06 (mon.0) 875 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.734451+0000 mon.vm06 (mon.0) 875 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.737128+0000 mon.vm06 (mon.0) 876 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: audit 2026-04-15T13:35:59.737128+0000 mon.vm06 (mon.0) 876 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: cluster 2026-04-15T13:36:00.180863+0000 mgr.vm06.qbbldl (mgr.14229) 173 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 170 B/s wr, 28 op/s 2026-04-15T13:36:00.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:00 vm09 bash[34466]: cluster 2026-04-15T13:36:00.180863+0000 mgr.vm06.qbbldl (mgr.14229) 173 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 170 B/s wr, 28 op/s 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.516963+0000 mon.vm06 (mon.0) 871 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.516963+0000 mon.vm06 (mon.0) 871 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.523345+0000 mon.vm06 (mon.0) 872 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.523345+0000 mon.vm06 (mon.0) 872 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.524472+0000 mon.vm06 (mon.0) 873 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.524472+0000 mon.vm06 (mon.0) 873 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.525104+0000 mon.vm06 (mon.0) 874 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.525104+0000 mon.vm06 (mon.0) 874 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: cephadm 2026-04-15T13:35:59.528371+0000 mgr.vm06.qbbldl (mgr.14229) 172 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: cephadm 2026-04-15T13:35:59.528371+0000 mgr.vm06.qbbldl (mgr.14229) 172 : cephadm [INF] Checking dashboard <-> RGW credentials 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.734451+0000 mon.vm06 (mon.0) 875 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.734451+0000 mon.vm06 (mon.0) 875 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.737128+0000 mon.vm06 (mon.0) 876 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: audit 2026-04-15T13:35:59.737128+0000 mon.vm06 (mon.0) 876 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: cluster 2026-04-15T13:36:00.180863+0000 mgr.vm06.qbbldl (mgr.14229) 173 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 170 B/s wr, 28 op/s 2026-04-15T13:36:01.012 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:00 vm06 bash[28114]: cluster 2026-04-15T13:36:00.180863+0000 mgr.vm06.qbbldl (mgr.14229) 173 : cluster [DBG] pgmap v77: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 16 KiB/s rd, 170 B/s wr, 28 op/s 2026-04-15T13:36:01.735 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.landug to stop 2026-04-15T13:36:01.924 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:01.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (32s) 2s ago 32s 97.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 cf506a8c903c 2026-04-15T13:36:01.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (34s) 2s ago 34s 97.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:01.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (35s) 2s ago 35s 97.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:01.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (33s) 2s ago 33s 97.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:02.157 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:02.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:02 vm06 bash[28114]: audit 2026-04-15T13:36:02.158284+0000 mon.vm06 (mon.0) 877 : audit [DBG] from='client.? 192.168.123.106:0/3257167995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:02.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:02 vm06 bash[28114]: audit 2026-04-15T13:36:02.158284+0000 mon.vm06 (mon.0) 877 : audit [DBG] from='client.? 192.168.123.106:0/3257167995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:02 vm09 bash[34466]: audit 2026-04-15T13:36:02.158284+0000 mon.vm06 (mon.0) 877 : audit [DBG] from='client.? 192.168.123.106:0/3257167995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:02 vm09 bash[34466]: audit 2026-04-15T13:36:02.158284+0000 mon.vm06 (mon.0) 877 : audit [DBG] from='client.? 192.168.123.106:0/3257167995' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:03.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:03 vm06 bash[28114]: audit 2026-04-15T13:36:01.715561+0000 mgr.vm06.qbbldl (mgr.14229) 174 : audit [DBG] from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:03 vm06 bash[28114]: audit 2026-04-15T13:36:01.715561+0000 mgr.vm06.qbbldl (mgr.14229) 174 : audit [DBG] from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:03 vm06 bash[28114]: audit 2026-04-15T13:36:01.921765+0000 mgr.vm06.qbbldl (mgr.14229) 175 : audit [DBG] from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:03 vm06 bash[28114]: audit 2026-04-15T13:36:01.921765+0000 mgr.vm06.qbbldl (mgr.14229) 175 : audit [DBG] from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:03 vm06 bash[28114]: cluster 2026-04-15T13:36:02.181243+0000 mgr.vm06.qbbldl (mgr.14229) 176 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 170 B/s wr, 30 op/s 2026-04-15T13:36:03.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:03 vm06 bash[28114]: cluster 2026-04-15T13:36:02.181243+0000 mgr.vm06.qbbldl (mgr.14229) 176 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 170 B/s wr, 30 op/s 2026-04-15T13:36:03.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:03 vm09 bash[34466]: audit 2026-04-15T13:36:01.715561+0000 mgr.vm06.qbbldl (mgr.14229) 174 : audit [DBG] from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:03 vm09 bash[34466]: audit 2026-04-15T13:36:01.715561+0000 mgr.vm06.qbbldl (mgr.14229) 174 : audit [DBG] from='client.14788 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:03 vm09 bash[34466]: audit 2026-04-15T13:36:01.921765+0000 mgr.vm06.qbbldl (mgr.14229) 175 : audit [DBG] from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:03 vm09 bash[34466]: audit 2026-04-15T13:36:01.921765+0000 mgr.vm06.qbbldl (mgr.14229) 175 : audit [DBG] from='client.14792 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:03.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:03 vm09 bash[34466]: cluster 2026-04-15T13:36:02.181243+0000 mgr.vm06.qbbldl (mgr.14229) 176 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 170 B/s wr, 30 op/s 2026-04-15T13:36:03.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:03 vm09 bash[34466]: cluster 2026-04-15T13:36:02.181243+0000 mgr.vm06.qbbldl (mgr.14229) 176 : cluster [DBG] pgmap v78: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 170 B/s wr, 30 op/s 2026-04-15T13:36:05.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:05 vm06 bash[28114]: cluster 2026-04-15T13:36:04.181613+0000 mgr.vm06.qbbldl (mgr.14229) 177 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T13:36:05.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:05 vm06 bash[28114]: cluster 2026-04-15T13:36:04.181613+0000 mgr.vm06.qbbldl (mgr.14229) 177 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T13:36:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:05 vm09 bash[34466]: cluster 2026-04-15T13:36:04.181613+0000 mgr.vm06.qbbldl (mgr.14229) 177 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T13:36:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:05 vm09 bash[34466]: cluster 2026-04-15T13:36:04.181613+0000 mgr.vm06.qbbldl (mgr.14229) 177 : cluster [DBG] pgmap v79: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 20 KiB/s rd, 341 B/s wr, 35 op/s 2026-04-15T13:36:07.383 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.landug to stop 2026-04-15T13:36:07.577 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:07.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (38s) 8s ago 38s 97.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 cf506a8c903c 2026-04-15T13:36:07.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (40s) 8s ago 40s 97.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:07.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (41s) 8s ago 41s 97.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:07.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (39s) 8s ago 39s 97.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: cluster 2026-04-15T13:36:06.182027+0000 mgr.vm06.qbbldl (mgr.14229) 178 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 25 op/s 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: cluster 2026-04-15T13:36:06.182027+0000 mgr.vm06.qbbldl (mgr.14229) 178 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 25 op/s 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.347544+0000 mon.vm06 (mon.0) 878 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.347544+0000 mon.vm06 (mon.0) 878 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.353195+0000 mon.vm06 (mon.0) 879 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.353195+0000 mon.vm06 (mon.0) 879 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.359461+0000 mon.vm06 (mon.0) 880 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.359461+0000 mon.vm06 (mon.0) 880 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.364738+0000 mon.vm06 (mon.0) 881 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.364738+0000 mon.vm06 (mon.0) 881 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.367949+0000 mon.vm06 (mon.0) 882 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.367949+0000 mon.vm06 (mon.0) 882 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.368269+0000 mgr.vm06.qbbldl (mgr.14229) 179 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.368269+0000 mgr.vm06.qbbldl (mgr.14229) 179 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.369284+0000 mon.vm06 (mon.0) 883 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:07 vm09 bash[34466]: audit 2026-04-15T13:36:06.369284+0000 mon.vm06 (mon.0) 883 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: cluster 2026-04-15T13:36:06.182027+0000 mgr.vm06.qbbldl (mgr.14229) 178 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 25 op/s 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: cluster 2026-04-15T13:36:06.182027+0000 mgr.vm06.qbbldl (mgr.14229) 178 : cluster [DBG] pgmap v80: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 15 KiB/s rd, 255 B/s wr, 25 op/s 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.347544+0000 mon.vm06 (mon.0) 878 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.347544+0000 mon.vm06 (mon.0) 878 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.353195+0000 mon.vm06 (mon.0) 879 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.353195+0000 mon.vm06 (mon.0) 879 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.359461+0000 mon.vm06 (mon.0) 880 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.359461+0000 mon.vm06 (mon.0) 880 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.364738+0000 mon.vm06 (mon.0) 881 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.364738+0000 mon.vm06 (mon.0) 881 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.367949+0000 mon.vm06 (mon.0) 882 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.367949+0000 mon.vm06 (mon.0) 882 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "dashboard get-prometheus-api-host"} : dispatch 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.368269+0000 mgr.vm06.qbbldl (mgr.14229) 179 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.368269+0000 mgr.vm06.qbbldl (mgr.14229) 179 : audit [DBG] from='mon.0 -' entity='mon.' cmd=[{"prefix": "dashboard get-prometheus-api-host"}]: dispatch 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.369284+0000 mon.vm06 (mon.0) 883 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:07.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:07 vm06 bash[28114]: audit 2026-04-15T13:36:06.369284+0000 mon.vm06 (mon.0) 883 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:07.838 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:08 vm09 bash[34466]: audit 2026-04-15T13:36:07.362325+0000 mgr.vm06.qbbldl (mgr.14229) 180 : audit [DBG] from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:08 vm09 bash[34466]: audit 2026-04-15T13:36:07.362325+0000 mgr.vm06.qbbldl (mgr.14229) 180 : audit [DBG] from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:08 vm09 bash[34466]: audit 2026-04-15T13:36:07.839114+0000 mon.vm06 (mon.0) 884 : audit [DBG] from='client.? 192.168.123.106:0/2659865163' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:08 vm09 bash[34466]: audit 2026-04-15T13:36:07.839114+0000 mon.vm06 (mon.0) 884 : audit [DBG] from='client.? 192.168.123.106:0/2659865163' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:08 vm09 bash[34466]: audit 2026-04-15T13:36:08.178559+0000 mon.vm06 (mon.0) 885 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]} : dispatch 2026-04-15T13:36:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:08 vm09 bash[34466]: audit 2026-04-15T13:36:08.178559+0000 mon.vm06 (mon.0) 885 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]} : dispatch 2026-04-15T13:36:08.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:08 vm06 bash[28114]: audit 2026-04-15T13:36:07.362325+0000 mgr.vm06.qbbldl (mgr.14229) 180 : audit [DBG] from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:08.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:08 vm06 bash[28114]: audit 2026-04-15T13:36:07.362325+0000 mgr.vm06.qbbldl (mgr.14229) 180 : audit [DBG] from='client.14800 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:08.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:08 vm06 bash[28114]: audit 2026-04-15T13:36:07.839114+0000 mon.vm06 (mon.0) 884 : audit [DBG] from='client.? 192.168.123.106:0/2659865163' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:08.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:08 vm06 bash[28114]: audit 2026-04-15T13:36:07.839114+0000 mon.vm06 (mon.0) 884 : audit [DBG] from='client.? 192.168.123.106:0/2659865163' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:08.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:08 vm06 bash[28114]: audit 2026-04-15T13:36:08.178559+0000 mon.vm06 (mon.0) 885 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]} : dispatch 2026-04-15T13:36:08.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:08 vm06 bash[28114]: audit 2026-04-15T13:36:08.178559+0000 mon.vm06 (mon.0) 885 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]} : dispatch 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: audit 2026-04-15T13:36:07.574905+0000 mgr.vm06.qbbldl (mgr.14229) 181 : audit [DBG] from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: audit 2026-04-15T13:36:07.574905+0000 mgr.vm06.qbbldl (mgr.14229) 181 : audit [DBG] from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: cluster 2026-04-15T13:36:08.182428+0000 mgr.vm06.qbbldl (mgr.14229) 182 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.9 KiB/s rd, 255 B/s wr, 16 op/s 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: cluster 2026-04-15T13:36:08.182428+0000 mgr.vm06.qbbldl (mgr.14229) 182 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.9 KiB/s rd, 255 B/s wr, 16 op/s 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: audit 2026-04-15T13:36:08.357698+0000 mon.vm06 (mon.0) 886 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]': finished 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: audit 2026-04-15T13:36:08.357698+0000 mon.vm06 (mon.0) 886 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]': finished 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: cluster 2026-04-15T13:36:08.359266+0000 mon.vm06 (mon.0) 887 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: cluster 2026-04-15T13:36:08.359266+0000 mon.vm06 (mon.0) 887 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: audit 2026-04-15T13:36:08.481854+0000 mon.vm06 (mon.0) 888 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:09.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:09 vm06 bash[28114]: audit 2026-04-15T13:36:08.481854+0000 mon.vm06 (mon.0) 888 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: audit 2026-04-15T13:36:07.574905+0000 mgr.vm06.qbbldl (mgr.14229) 181 : audit [DBG] from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: audit 2026-04-15T13:36:07.574905+0000 mgr.vm06.qbbldl (mgr.14229) 181 : audit [DBG] from='client.14804 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: cluster 2026-04-15T13:36:08.182428+0000 mgr.vm06.qbbldl (mgr.14229) 182 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.9 KiB/s rd, 255 B/s wr, 16 op/s 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: cluster 2026-04-15T13:36:08.182428+0000 mgr.vm06.qbbldl (mgr.14229) 182 : cluster [DBG] pgmap v81: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.9 KiB/s rd, 255 B/s wr, 16 op/s 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: audit 2026-04-15T13:36:08.357698+0000 mon.vm06 (mon.0) 886 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]': finished 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: audit 2026-04-15T13:36:08.357698+0000 mon.vm06 (mon.0) 886 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd='[{"prefix": "osd pg-upmap-items", "format": "json", "pgid": "5.19", "id": [4, 6]}]': finished 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: cluster 2026-04-15T13:36:08.359266+0000 mon.vm06 (mon.0) 887 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: cluster 2026-04-15T13:36:08.359266+0000 mon.vm06 (mon.0) 887 : cluster [DBG] osdmap e34: 8 total, 8 up, 8 in 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: audit 2026-04-15T13:36:08.481854+0000 mon.vm06 (mon.0) 888 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:09 vm09 bash[34466]: audit 2026-04-15T13:36:08.481854+0000 mon.vm06 (mon.0) 888 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:10 vm06 bash[28114]: cluster 2026-04-15T13:36:09.377071+0000 mon.vm06 (mon.0) 889 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-15T13:36:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:10 vm06 bash[28114]: cluster 2026-04-15T13:36:09.377071+0000 mon.vm06 (mon.0) 889 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-15T13:36:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:10 vm06 bash[28114]: cluster 2026-04-15T13:36:10.182919+0000 mgr.vm06.qbbldl (mgr.14229) 183 : cluster [DBG] pgmap v84: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.6 KiB/s rd, 1023 B/s wr, 9 op/s 2026-04-15T13:36:10.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:10 vm06 bash[28114]: cluster 2026-04-15T13:36:10.182919+0000 mgr.vm06.qbbldl (mgr.14229) 183 : cluster [DBG] pgmap v84: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.6 KiB/s rd, 1023 B/s wr, 9 op/s 2026-04-15T13:36:10.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:10 vm09 bash[34466]: cluster 2026-04-15T13:36:09.377071+0000 mon.vm06 (mon.0) 889 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-15T13:36:10.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:10 vm09 bash[34466]: cluster 2026-04-15T13:36:09.377071+0000 mon.vm06 (mon.0) 889 : cluster [DBG] osdmap e35: 8 total, 8 up, 8 in 2026-04-15T13:36:10.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:10 vm09 bash[34466]: cluster 2026-04-15T13:36:10.182919+0000 mgr.vm06.qbbldl (mgr.14229) 183 : cluster [DBG] pgmap v84: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.6 KiB/s rd, 1023 B/s wr, 9 op/s 2026-04-15T13:36:10.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:10 vm09 bash[34466]: cluster 2026-04-15T13:36:10.182919+0000 mgr.vm06.qbbldl (mgr.14229) 183 : cluster [DBG] pgmap v84: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 4.6 KiB/s rd, 1023 B/s wr, 9 op/s 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.209399+0000 mon.vm06 (mon.0) 890 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.209399+0000 mon.vm06 (mon.0) 890 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.215177+0000 mon.vm06 (mon.0) 891 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.215177+0000 mon.vm06 (mon.0) 891 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.824303+0000 mon.vm06 (mon.0) 892 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.824303+0000 mon.vm06 (mon.0) 892 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.831520+0000 mon.vm06 (mon.0) 893 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.831520+0000 mon.vm06 (mon.0) 893 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.833131+0000 mon.vm06 (mon.0) 894 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.833131+0000 mon.vm06 (mon.0) 894 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.833822+0000 mon.vm06 (mon.0) 895 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.833822+0000 mon.vm06 (mon.0) 895 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.838529+0000 mon.vm06 (mon.0) 896 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.838529+0000 mon.vm06 (mon.0) 896 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.839884+0000 mon.vm06 (mon.0) 897 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:12.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:12 vm06 bash[28114]: audit 2026-04-15T13:36:11.839884+0000 mon.vm06 (mon.0) 897 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.209399+0000 mon.vm06 (mon.0) 890 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.209399+0000 mon.vm06 (mon.0) 890 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.215177+0000 mon.vm06 (mon.0) 891 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.215177+0000 mon.vm06 (mon.0) 891 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.824303+0000 mon.vm06 (mon.0) 892 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.824303+0000 mon.vm06 (mon.0) 892 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.831520+0000 mon.vm06 (mon.0) 893 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.831520+0000 mon.vm06 (mon.0) 893 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.833131+0000 mon.vm06 (mon.0) 894 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.833131+0000 mon.vm06 (mon.0) 894 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.833822+0000 mon.vm06 (mon.0) 895 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.833822+0000 mon.vm06 (mon.0) 895 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.838529+0000 mon.vm06 (mon.0) 896 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.838529+0000 mon.vm06 (mon.0) 896 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.839884+0000 mon.vm06 (mon.0) 897 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:12 vm09 bash[34466]: audit 2026-04-15T13:36:11.839884+0000 mon.vm06 (mon.0) 897 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:13.047 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 stopped 1s ago 43s - - 2026-04-15T13:36:13.052 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:36:13.052 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:36:13.053 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:36:13.255 INFO:teuthology.orchestra.run.vm06.stdout:anonymousScheduled to start rgw.foo.vm06.landug on host 'vm06' 2026-04-15T13:36:13.491 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.landug to start 2026-04-15T13:36:13.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:13 vm06 bash[28114]: cluster 2026-04-15T13:36:12.183332+0000 mgr.vm06.qbbldl (mgr.14229) 184 : cluster [DBG] pgmap v85: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:13.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:13 vm06 bash[28114]: cluster 2026-04-15T13:36:12.183332+0000 mgr.vm06.qbbldl (mgr.14229) 184 : cluster [DBG] pgmap v85: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:13 vm09 bash[34466]: cluster 2026-04-15T13:36:12.183332+0000 mgr.vm06.qbbldl (mgr.14229) 184 : cluster [DBG] pgmap v85: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:13 vm09 bash[34466]: cluster 2026-04-15T13:36:12.183332+0000 mgr.vm06.qbbldl (mgr.14229) 184 : cluster [DBG] pgmap v85: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:13.703 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:13.703 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 stopped 1s ago 44s - - 2026-04-15T13:36:13.703 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (46s) 1s ago 46s 98.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:13.703 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (47s) 2s ago 47s 98.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:13.703 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (45s) 2s ago 45s 98.7M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:13.945 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.031678+0000 mgr.vm06.qbbldl (mgr.14229) 185 : audit [DBG] from='client.24481 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.031678+0000 mgr.vm06.qbbldl (mgr.14229) 185 : audit [DBG] from='client.24481 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.241659+0000 mgr.vm06.qbbldl (mgr.14229) 186 : audit [DBG] from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.241659+0000 mgr.vm06.qbbldl (mgr.14229) 186 : audit [DBG] from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: cephadm 2026-04-15T13:36:13.242141+0000 mgr.vm06.qbbldl (mgr.14229) 187 : cephadm [INF] Schedule start daemon rgw.foo.vm06.landug 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: cephadm 2026-04-15T13:36:13.242141+0000 mgr.vm06.qbbldl (mgr.14229) 187 : cephadm [INF] Schedule start daemon rgw.foo.vm06.landug 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.248400+0000 mon.vm06 (mon.0) 898 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.248400+0000 mon.vm06 (mon.0) 898 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.254985+0000 mon.vm06 (mon.0) 899 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.254985+0000 mon.vm06 (mon.0) 899 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.255954+0000 mon.vm06 (mon.0) 900 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:14.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.255954+0000 mon.vm06 (mon.0) 900 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:14.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.945979+0000 mon.vm06 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.106:0/4079732122' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:14.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:14 vm06 bash[28114]: audit 2026-04-15T13:36:13.945979+0000 mon.vm06 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.106:0/4079732122' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.031678+0000 mgr.vm06.qbbldl (mgr.14229) 185 : audit [DBG] from='client.24481 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.031678+0000 mgr.vm06.qbbldl (mgr.14229) 185 : audit [DBG] from='client.24481 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.241659+0000 mgr.vm06.qbbldl (mgr.14229) 186 : audit [DBG] from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.241659+0000 mgr.vm06.qbbldl (mgr.14229) 186 : audit [DBG] from='client.14816 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.landug", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: cephadm 2026-04-15T13:36:13.242141+0000 mgr.vm06.qbbldl (mgr.14229) 187 : cephadm [INF] Schedule start daemon rgw.foo.vm06.landug 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: cephadm 2026-04-15T13:36:13.242141+0000 mgr.vm06.qbbldl (mgr.14229) 187 : cephadm [INF] Schedule start daemon rgw.foo.vm06.landug 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.248400+0000 mon.vm06 (mon.0) 898 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.248400+0000 mon.vm06 (mon.0) 898 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.254985+0000 mon.vm06 (mon.0) 899 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.254985+0000 mon.vm06 (mon.0) 899 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.255954+0000 mon.vm06 (mon.0) 900 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.255954+0000 mon.vm06 (mon.0) 900 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.945979+0000 mon.vm06 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.106:0/4079732122' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:14 vm09 bash[34466]: audit 2026-04-15T13:36:13.945979+0000 mon.vm06 (mon.0) 901 : audit [DBG] from='client.? 192.168.123.106:0/4079732122' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:15 vm09 bash[34466]: audit 2026-04-15T13:36:13.471012+0000 mgr.vm06.qbbldl (mgr.14229) 188 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:15 vm09 bash[34466]: audit 2026-04-15T13:36:13.471012+0000 mgr.vm06.qbbldl (mgr.14229) 188 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:15 vm09 bash[34466]: audit 2026-04-15T13:36:13.701158+0000 mgr.vm06.qbbldl (mgr.14229) 189 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:15 vm09 bash[34466]: audit 2026-04-15T13:36:13.701158+0000 mgr.vm06.qbbldl (mgr.14229) 189 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:15 vm09 bash[34466]: cluster 2026-04-15T13:36:14.183698+0000 mgr.vm06.qbbldl (mgr.14229) 190 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:15 vm09 bash[34466]: cluster 2026-04-15T13:36:14.183698+0000 mgr.vm06.qbbldl (mgr.14229) 190 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:15.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:15 vm06 bash[28114]: audit 2026-04-15T13:36:13.471012+0000 mgr.vm06.qbbldl (mgr.14229) 188 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:15 vm06 bash[28114]: audit 2026-04-15T13:36:13.471012+0000 mgr.vm06.qbbldl (mgr.14229) 188 : audit [DBG] from='client.14820 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:15 vm06 bash[28114]: audit 2026-04-15T13:36:13.701158+0000 mgr.vm06.qbbldl (mgr.14229) 189 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:15 vm06 bash[28114]: audit 2026-04-15T13:36:13.701158+0000 mgr.vm06.qbbldl (mgr.14229) 189 : audit [DBG] from='client.14824 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:15.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:15 vm06 bash[28114]: cluster 2026-04-15T13:36:14.183698+0000 mgr.vm06.qbbldl (mgr.14229) 190 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:15.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:15 vm06 bash[28114]: cluster 2026-04-15T13:36:14.183698+0000 mgr.vm06.qbbldl (mgr.14229) 190 : cluster [DBG] pgmap v86: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:17 vm09 bash[34466]: cluster 2026-04-15T13:36:16.184127+0000 mgr.vm06.qbbldl (mgr.14229) 191 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:17 vm09 bash[34466]: cluster 2026-04-15T13:36:16.184127+0000 mgr.vm06.qbbldl (mgr.14229) 191 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:17.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:17 vm06 bash[28114]: cluster 2026-04-15T13:36:16.184127+0000 mgr.vm06.qbbldl (mgr.14229) 191 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:17.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:17 vm06 bash[28114]: cluster 2026-04-15T13:36:16.184127+0000 mgr.vm06.qbbldl (mgr.14229) 191 : cluster [DBG] pgmap v87: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 383 B/s rd, 767 B/s wr, 1 op/s 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.124762+0000 mon.vm06 (mon.0) 902 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.124762+0000 mon.vm06 (mon.0) 902 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.130704+0000 mon.vm06 (mon.0) 903 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.130704+0000 mon.vm06 (mon.0) 903 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: cluster 2026-04-15T13:36:18.184624+0000 mgr.vm06.qbbldl (mgr.14229) 192 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 312 B/s rd, 625 B/s wr, 0 op/s 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: cluster 2026-04-15T13:36:18.184624+0000 mgr.vm06.qbbldl (mgr.14229) 192 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 312 B/s rd, 625 B/s wr, 0 op/s 2026-04-15T13:36:19.134 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.714562+0000 mon.vm06 (mon.0) 904 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.714562+0000 mon.vm06 (mon.0) 904 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.732459+0000 mon.vm06 (mon.0) 905 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.732459+0000 mon.vm06 (mon.0) 905 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.733860+0000 mon.vm06 (mon.0) 906 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.733860+0000 mon.vm06 (mon.0) 906 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.734490+0000 mon.vm06 (mon.0) 907 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.734490+0000 mon.vm06 (mon.0) 907 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.743556+0000 mon.vm06 (mon.0) 908 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.743556+0000 mon.vm06 (mon.0) 908 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.745444+0000 mon.vm06 (mon.0) 909 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:19.135 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:19 vm06 bash[28114]: audit 2026-04-15T13:36:18.745444+0000 mon.vm06 (mon.0) 909 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:19.180 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.landug to start 2026-04-15T13:36:19.382 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:19.383 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 stopped 0s ago 50s - - 2026-04-15T13:36:19.383 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (52s) 0s ago 52s 99.0M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:19.383 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (53s) 1s ago 53s 99.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:19.383 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (51s) 1s ago 51s 99.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.124762+0000 mon.vm06 (mon.0) 902 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.124762+0000 mon.vm06 (mon.0) 902 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.130704+0000 mon.vm06 (mon.0) 903 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.130704+0000 mon.vm06 (mon.0) 903 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: cluster 2026-04-15T13:36:18.184624+0000 mgr.vm06.qbbldl (mgr.14229) 192 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 312 B/s rd, 625 B/s wr, 0 op/s 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: cluster 2026-04-15T13:36:18.184624+0000 mgr.vm06.qbbldl (mgr.14229) 192 : cluster [DBG] pgmap v88: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 312 B/s rd, 625 B/s wr, 0 op/s 2026-04-15T13:36:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.714562+0000 mon.vm06 (mon.0) 904 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.714562+0000 mon.vm06 (mon.0) 904 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.732459+0000 mon.vm06 (mon.0) 905 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.732459+0000 mon.vm06 (mon.0) 905 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.733860+0000 mon.vm06 (mon.0) 906 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.733860+0000 mon.vm06 (mon.0) 906 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.734490+0000 mon.vm06 (mon.0) 907 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.734490+0000 mon.vm06 (mon.0) 907 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.743556+0000 mon.vm06 (mon.0) 908 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.743556+0000 mon.vm06 (mon.0) 908 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.745444+0000 mon.vm06 (mon.0) 909 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:19.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:19 vm09 bash[34466]: audit 2026-04-15T13:36:18.745444+0000 mon.vm06 (mon.0) 909 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:19.689 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.158711+0000 mgr.vm06.qbbldl (mgr.14229) 193 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.158711+0000 mgr.vm06.qbbldl (mgr.14229) 193 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.441691+0000 mon.vm06 (mon.0) 910 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.441691+0000 mon.vm06 (mon.0) 910 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.448908+0000 mon.vm06 (mon.0) 911 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.448908+0000 mon.vm06 (mon.0) 911 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.452466+0000 mon.vm06 (mon.0) 912 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.452466+0000 mon.vm06 (mon.0) 912 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.689679+0000 mon.vm06 (mon.0) 913 : audit [DBG] from='client.? 192.168.123.106:0/3042279607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:20.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:20 vm06 bash[28114]: audit 2026-04-15T13:36:19.689679+0000 mon.vm06 (mon.0) 913 : audit [DBG] from='client.? 192.168.123.106:0/3042279607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.158711+0000 mgr.vm06.qbbldl (mgr.14229) 193 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.158711+0000 mgr.vm06.qbbldl (mgr.14229) 193 : audit [DBG] from='client.14832 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.441691+0000 mon.vm06 (mon.0) 910 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.441691+0000 mon.vm06 (mon.0) 910 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.448908+0000 mon.vm06 (mon.0) 911 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.448908+0000 mon.vm06 (mon.0) 911 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.452466+0000 mon.vm06 (mon.0) 912 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.452466+0000 mon.vm06 (mon.0) 912 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.689679+0000 mon.vm06 (mon.0) 913 : audit [DBG] from='client.? 192.168.123.106:0/3042279607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:20 vm09 bash[34466]: audit 2026-04-15T13:36:19.689679+0000 mon.vm06 (mon.0) 913 : audit [DBG] from='client.? 192.168.123.106:0/3042279607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:21.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:21 vm06 bash[28114]: audit 2026-04-15T13:36:19.379081+0000 mgr.vm06.qbbldl (mgr.14229) 194 : audit [DBG] from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:21.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:21 vm06 bash[28114]: audit 2026-04-15T13:36:19.379081+0000 mgr.vm06.qbbldl (mgr.14229) 194 : audit [DBG] from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:21.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:21 vm06 bash[28114]: cluster 2026-04-15T13:36:20.185160+0000 mgr.vm06.qbbldl (mgr.14229) 195 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-15T13:36:21.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:21 vm06 bash[28114]: cluster 2026-04-15T13:36:20.185160+0000 mgr.vm06.qbbldl (mgr.14229) 195 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-15T13:36:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:21 vm09 bash[34466]: audit 2026-04-15T13:36:19.379081+0000 mgr.vm06.qbbldl (mgr.14229) 194 : audit [DBG] from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:21 vm09 bash[34466]: audit 2026-04-15T13:36:19.379081+0000 mgr.vm06.qbbldl (mgr.14229) 194 : audit [DBG] from='client.14836 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:21 vm09 bash[34466]: cluster 2026-04-15T13:36:20.185160+0000 mgr.vm06.qbbldl (mgr.14229) 195 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-15T13:36:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:21 vm09 bash[34466]: cluster 2026-04-15T13:36:20.185160+0000 mgr.vm06.qbbldl (mgr.14229) 195 : cluster [DBG] pgmap v89: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 0 B/s wr, 30 op/s 2026-04-15T13:36:23.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:23 vm06 bash[28114]: cluster 2026-04-15T13:36:22.185556+0000 mgr.vm06.qbbldl (mgr.14229) 196 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 45 op/s 2026-04-15T13:36:23.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:23 vm06 bash[28114]: cluster 2026-04-15T13:36:22.185556+0000 mgr.vm06.qbbldl (mgr.14229) 196 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 45 op/s 2026-04-15T13:36:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:23 vm09 bash[34466]: cluster 2026-04-15T13:36:22.185556+0000 mgr.vm06.qbbldl (mgr.14229) 196 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 45 op/s 2026-04-15T13:36:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:23 vm09 bash[34466]: cluster 2026-04-15T13:36:22.185556+0000 mgr.vm06.qbbldl (mgr.14229) 196 : cluster [DBG] pgmap v90: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 0 B/s wr, 45 op/s 2026-04-15T13:36:24.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:24 vm06 bash[28114]: audit 2026-04-15T13:36:23.481970+0000 mon.vm06 (mon.0) 914 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:24.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:24 vm06 bash[28114]: audit 2026-04-15T13:36:23.481970+0000 mon.vm06 (mon.0) 914 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:24 vm09 bash[34466]: audit 2026-04-15T13:36:23.481970+0000 mon.vm06 (mon.0) 914 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:24 vm09 bash[34466]: audit 2026-04-15T13:36:23.481970+0000 mon.vm06 (mon.0) 914 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:24.974 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.landug to start 2026-04-15T13:36:25.184 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:25.184 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5s) 0s ago 55s 92.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:25.184 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (57s) 0s ago 57s 99.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:25.185 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (58s) 0s ago 58s 99.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:25.185 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (56s) 0s ago 56s 99.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:25.483 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: cluster 2026-04-15T13:36:24.186021+0000 mgr.vm06.qbbldl (mgr.14229) 197 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: cluster 2026-04-15T13:36:24.186021+0000 mgr.vm06.qbbldl (mgr.14229) 197 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:24.473055+0000 mon.vm06 (mon.0) 915 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:24.473055+0000 mon.vm06 (mon.0) 915 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:24.478328+0000 mon.vm06 (mon.0) 916 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:24.478328+0000 mon.vm06 (mon.0) 916 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:25.080414+0000 mon.vm06 (mon.0) 917 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:25.080414+0000 mon.vm06 (mon.0) 917 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:25.090637+0000 mon.vm06 (mon.0) 918 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:25 vm06 bash[28114]: audit 2026-04-15T13:36:25.090637+0000 mon.vm06 (mon.0) 918 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: cluster 2026-04-15T13:36:24.186021+0000 mgr.vm06.qbbldl (mgr.14229) 197 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-04-15T13:36:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: cluster 2026-04-15T13:36:24.186021+0000 mgr.vm06.qbbldl (mgr.14229) 197 : cluster [DBG] pgmap v91: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 59 KiB/s rd, 0 B/s wr, 96 op/s 2026-04-15T13:36:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:24.473055+0000 mon.vm06 (mon.0) 915 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:24.473055+0000 mon.vm06 (mon.0) 915 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:24.478328+0000 mon.vm06 (mon.0) 916 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:24.478328+0000 mon.vm06 (mon.0) 916 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:25.080414+0000 mon.vm06 (mon.0) 917 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:25.080414+0000 mon.vm06 (mon.0) 917 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:25.090637+0000 mon.vm06 (mon.0) 918 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:25.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:25 vm09 bash[34466]: audit 2026-04-15T13:36:25.090637+0000 mon.vm06 (mon.0) 918 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:24.943765+0000 mgr.vm06.qbbldl (mgr.14229) 198 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:24.943765+0000 mgr.vm06.qbbldl (mgr.14229) 198 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.181891+0000 mgr.vm06.qbbldl (mgr.14229) 199 : audit [DBG] from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.181891+0000 mgr.vm06.qbbldl (mgr.14229) 199 : audit [DBG] from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.474447+0000 mon.vm06 (mon.0) 919 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.474447+0000 mon.vm06 (mon.0) 919 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.475394+0000 mon.vm06 (mon.0) 920 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.475394+0000 mon.vm06 (mon.0) 920 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.481448+0000 mon.vm06 (mon.0) 921 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.481448+0000 mon.vm06 (mon.0) 921 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.483307+0000 mon.vm06 (mon.0) 922 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.483307+0000 mon.vm06 (mon.0) 922 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:26.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.483690+0000 mon.vm06 (mon.0) 923 : audit [DBG] from='client.? 192.168.123.106:0/3351280690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:26.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:26 vm06 bash[28114]: audit 2026-04-15T13:36:25.483690+0000 mon.vm06 (mon.0) 923 : audit [DBG] from='client.? 192.168.123.106:0/3351280690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:24.943765+0000 mgr.vm06.qbbldl (mgr.14229) 198 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:24.943765+0000 mgr.vm06.qbbldl (mgr.14229) 198 : audit [DBG] from='client.14856 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.181891+0000 mgr.vm06.qbbldl (mgr.14229) 199 : audit [DBG] from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.181891+0000 mgr.vm06.qbbldl (mgr.14229) 199 : audit [DBG] from='client.14860 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.474447+0000 mon.vm06 (mon.0) 919 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.474447+0000 mon.vm06 (mon.0) 919 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.475394+0000 mon.vm06 (mon.0) 920 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.475394+0000 mon.vm06 (mon.0) 920 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.481448+0000 mon.vm06 (mon.0) 921 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.481448+0000 mon.vm06 (mon.0) 921 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.483307+0000 mon.vm06 (mon.0) 922 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.483307+0000 mon.vm06 (mon.0) 922 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.483690+0000 mon.vm06 (mon.0) 923 : audit [DBG] from='client.? 192.168.123.106:0/3351280690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:26.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:26 vm09 bash[34466]: audit 2026-04-15T13:36:25.483690+0000 mon.vm06 (mon.0) 923 : audit [DBG] from='client.? 192.168.123.106:0/3351280690' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:27.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:27 vm09 bash[34466]: cluster 2026-04-15T13:36:26.186597+0000 mgr.vm06.qbbldl (mgr.14229) 200 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:27.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:27 vm09 bash[34466]: cluster 2026-04-15T13:36:26.186597+0000 mgr.vm06.qbbldl (mgr.14229) 200 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:27.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:27 vm06 bash[28114]: cluster 2026-04-15T13:36:26.186597+0000 mgr.vm06.qbbldl (mgr.14229) 200 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:27.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:27 vm06 bash[28114]: cluster 2026-04-15T13:36:26.186597+0000 mgr.vm06.qbbldl (mgr.14229) 200 : cluster [DBG] pgmap v92: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:29.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:29 vm09 bash[34466]: cluster 2026-04-15T13:36:28.187008+0000 mgr.vm06.qbbldl (mgr.14229) 201 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:29.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:29 vm09 bash[34466]: cluster 2026-04-15T13:36:28.187008+0000 mgr.vm06.qbbldl (mgr.14229) 201 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:29.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:29 vm06 bash[28114]: cluster 2026-04-15T13:36:28.187008+0000 mgr.vm06.qbbldl (mgr.14229) 201 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:29.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:29 vm06 bash[28114]: cluster 2026-04-15T13:36:28.187008+0000 mgr.vm06.qbbldl (mgr.14229) 201 : cluster [DBG] pgmap v93: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:30.696 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11s) 5s ago 61s 92.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:30.894 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop rgw.foo.vm06.liyzhd on host 'vm06' 2026-04-15T13:36:31.147 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:36:31.349 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:31.349 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11s) 6s ago 62s 92.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:31.349 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (63s) 6s ago 64s 99.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:31.349 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (64s) 6s ago 65s 99.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:31.349 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (62s) 6s ago 63s 99.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:31.587 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: cluster 2026-04-15T13:36:30.187418+0000 mgr.vm06.qbbldl (mgr.14229) 202 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: cluster 2026-04-15T13:36:30.187418+0000 mgr.vm06.qbbldl (mgr.14229) 202 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: audit 2026-04-15T13:36:30.888745+0000 mon.vm06 (mon.0) 924 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: audit 2026-04-15T13:36:30.888745+0000 mon.vm06 (mon.0) 924 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: audit 2026-04-15T13:36:30.894472+0000 mon.vm06 (mon.0) 925 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: audit 2026-04-15T13:36:30.894472+0000 mon.vm06 (mon.0) 925 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: audit 2026-04-15T13:36:30.896016+0000 mon.vm06 (mon.0) 926 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:31.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:31 vm09 bash[34466]: audit 2026-04-15T13:36:30.896016+0000 mon.vm06 (mon.0) 926 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: cluster 2026-04-15T13:36:30.187418+0000 mgr.vm06.qbbldl (mgr.14229) 202 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: cluster 2026-04-15T13:36:30.187418+0000 mgr.vm06.qbbldl (mgr.14229) 202 : cluster [DBG] pgmap v94: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 66 KiB/s rd, 0 B/s wr, 108 op/s 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: audit 2026-04-15T13:36:30.888745+0000 mon.vm06 (mon.0) 924 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: audit 2026-04-15T13:36:30.888745+0000 mon.vm06 (mon.0) 924 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: audit 2026-04-15T13:36:30.894472+0000 mon.vm06 (mon.0) 925 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: audit 2026-04-15T13:36:30.894472+0000 mon.vm06 (mon.0) 925 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: audit 2026-04-15T13:36:30.896016+0000 mon.vm06 (mon.0) 926 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:31.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:31 vm06 bash[28114]: audit 2026-04-15T13:36:30.896016+0000 mon.vm06 (mon.0) 926 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:30.680579+0000 mgr.vm06.qbbldl (mgr.14229) 203 : audit [DBG] from='client.14868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:30.680579+0000 mgr.vm06.qbbldl (mgr.14229) 203 : audit [DBG] from='client.14868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:30.880520+0000 mgr.vm06.qbbldl (mgr.14229) 204 : audit [DBG] from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:30.880520+0000 mgr.vm06.qbbldl (mgr.14229) 204 : audit [DBG] from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: cephadm 2026-04-15T13:36:30.881261+0000 mgr.vm06.qbbldl (mgr.14229) 205 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.liyzhd 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: cephadm 2026-04-15T13:36:30.881261+0000 mgr.vm06.qbbldl (mgr.14229) 205 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.liyzhd 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:31.129658+0000 mgr.vm06.qbbldl (mgr.14229) 206 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:31.129658+0000 mgr.vm06.qbbldl (mgr.14229) 206 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:31.345712+0000 mgr.vm06.qbbldl (mgr.14229) 207 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:31.345712+0000 mgr.vm06.qbbldl (mgr.14229) 207 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:31.588193+0000 mon.vm06 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.106:0/3975073809' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:32.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:32 vm09 bash[34466]: audit 2026-04-15T13:36:31.588193+0000 mon.vm06 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.106:0/3975073809' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:30.680579+0000 mgr.vm06.qbbldl (mgr.14229) 203 : audit [DBG] from='client.14868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:30.680579+0000 mgr.vm06.qbbldl (mgr.14229) 203 : audit [DBG] from='client.14868 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:30.880520+0000 mgr.vm06.qbbldl (mgr.14229) 204 : audit [DBG] from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:30.880520+0000 mgr.vm06.qbbldl (mgr.14229) 204 : audit [DBG] from='client.14872 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: cephadm 2026-04-15T13:36:30.881261+0000 mgr.vm06.qbbldl (mgr.14229) 205 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.liyzhd 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: cephadm 2026-04-15T13:36:30.881261+0000 mgr.vm06.qbbldl (mgr.14229) 205 : cephadm [INF] Schedule stop daemon rgw.foo.vm06.liyzhd 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:31.129658+0000 mgr.vm06.qbbldl (mgr.14229) 206 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:31.129658+0000 mgr.vm06.qbbldl (mgr.14229) 206 : audit [DBG] from='client.14876 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:31.345712+0000 mgr.vm06.qbbldl (mgr.14229) 207 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:31.345712+0000 mgr.vm06.qbbldl (mgr.14229) 207 : audit [DBG] from='client.14880 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:31.588193+0000 mon.vm06 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.106:0/3975073809' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:32.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:32 vm06 bash[28114]: audit 2026-04-15T13:36:31.588193+0000 mon.vm06 (mon.0) 927 : audit [DBG] from='client.? 192.168.123.106:0/3975073809' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:33 vm09 bash[34466]: cluster 2026-04-15T13:36:32.187839+0000 mgr.vm06.qbbldl (mgr.14229) 208 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 0 B/s wr, 81 op/s 2026-04-15T13:36:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:33 vm09 bash[34466]: cluster 2026-04-15T13:36:32.187839+0000 mgr.vm06.qbbldl (mgr.14229) 208 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 0 B/s wr, 81 op/s 2026-04-15T13:36:33.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:33 vm06 bash[28114]: cluster 2026-04-15T13:36:32.187839+0000 mgr.vm06.qbbldl (mgr.14229) 208 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 0 B/s wr, 81 op/s 2026-04-15T13:36:33.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:33 vm06 bash[28114]: cluster 2026-04-15T13:36:32.187839+0000 mgr.vm06.qbbldl (mgr.14229) 208 : cluster [DBG] pgmap v95: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 49 KiB/s rd, 0 B/s wr, 81 op/s 2026-04-15T13:36:35.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:35 vm09 bash[34466]: cluster 2026-04-15T13:36:34.188302+0000 mgr.vm06.qbbldl (mgr.14229) 209 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 63 op/s 2026-04-15T13:36:35.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:35 vm09 bash[34466]: cluster 2026-04-15T13:36:34.188302+0000 mgr.vm06.qbbldl (mgr.14229) 209 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 63 op/s 2026-04-15T13:36:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:35 vm06 bash[28114]: cluster 2026-04-15T13:36:34.188302+0000 mgr.vm06.qbbldl (mgr.14229) 209 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 63 op/s 2026-04-15T13:36:35.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:35 vm06 bash[28114]: cluster 2026-04-15T13:36:34.188302+0000 mgr.vm06.qbbldl (mgr.14229) 209 : cluster [DBG] pgmap v96: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 63 op/s 2026-04-15T13:36:36.833 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:36:37.070 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:37.070 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (17s) 0s ago 67s 94.1M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:37.070 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (69s) 0s ago 69s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:37.070 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (70s) 1s ago 70s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:37.070 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (68s) 1s ago 68s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:35.810891+0000 mon.vm06 (mon.0) 928 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:35.810891+0000 mon.vm06 (mon.0) 928 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:35.817690+0000 mon.vm06 (mon.0) 929 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:35.817690+0000 mon.vm06 (mon.0) 929 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: cluster 2026-04-15T13:36:36.188847+0000 mgr.vm06.qbbldl (mgr.14229) 210 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.2 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: cluster 2026-04-15T13:36:36.188847+0000 mgr.vm06.qbbldl (mgr.14229) 210 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.2 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-15T13:36:37.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.429717+0000 mon.vm06 (mon.0) 930 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.429717+0000 mon.vm06 (mon.0) 930 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.436428+0000 mon.vm06 (mon.0) 931 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.436428+0000 mon.vm06 (mon.0) 931 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.437985+0000 mon.vm06 (mon.0) 932 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.437985+0000 mon.vm06 (mon.0) 932 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.438746+0000 mon.vm06 (mon.0) 933 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.438746+0000 mon.vm06 (mon.0) 933 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.443700+0000 mon.vm06 (mon.0) 934 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.443700+0000 mon.vm06 (mon.0) 934 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.445686+0000 mon.vm06 (mon.0) 935 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:37.110 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:36 vm09 bash[34466]: audit 2026-04-15T13:36:36.445686+0000 mon.vm06 (mon.0) 935 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:35.810891+0000 mon.vm06 (mon.0) 928 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:35.810891+0000 mon.vm06 (mon.0) 928 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:35.817690+0000 mon.vm06 (mon.0) 929 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:35.817690+0000 mon.vm06 (mon.0) 929 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: cluster 2026-04-15T13:36:36.188847+0000 mgr.vm06.qbbldl (mgr.14229) 210 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.2 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: cluster 2026-04-15T13:36:36.188847+0000 mgr.vm06.qbbldl (mgr.14229) 210 : cluster [DBG] pgmap v97: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.2 KiB/s rd, 0 B/s wr, 12 op/s 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.429717+0000 mon.vm06 (mon.0) 930 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.429717+0000 mon.vm06 (mon.0) 930 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.436428+0000 mon.vm06 (mon.0) 931 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.436428+0000 mon.vm06 (mon.0) 931 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.437985+0000 mon.vm06 (mon.0) 932 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.437985+0000 mon.vm06 (mon.0) 932 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.438746+0000 mon.vm06 (mon.0) 933 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.438746+0000 mon.vm06 (mon.0) 933 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.443700+0000 mon.vm06 (mon.0) 934 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.443700+0000 mon.vm06 (mon.0) 934 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.445686+0000 mon.vm06 (mon.0) 935 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:37.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:36 vm06 bash[28114]: audit 2026-04-15T13:36:36.445686+0000 mon.vm06 (mon.0) 935 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:37.312 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:38.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:37 vm09 bash[34466]: audit 2026-04-15T13:36:36.798212+0000 mgr.vm06.qbbldl (mgr.14229) 211 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:37 vm09 bash[34466]: audit 2026-04-15T13:36:36.798212+0000 mgr.vm06.qbbldl (mgr.14229) 211 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:37 vm09 bash[34466]: audit 2026-04-15T13:36:37.066418+0000 mgr.vm06.qbbldl (mgr.14229) 212 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:37 vm09 bash[34466]: audit 2026-04-15T13:36:37.066418+0000 mgr.vm06.qbbldl (mgr.14229) 212 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:37 vm09 bash[34466]: audit 2026-04-15T13:36:37.313354+0000 mon.vm06 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.106:0/382582481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:38.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:37 vm09 bash[34466]: audit 2026-04-15T13:36:37.313354+0000 mon.vm06 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.106:0/382582481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:37 vm06 bash[28114]: audit 2026-04-15T13:36:36.798212+0000 mgr.vm06.qbbldl (mgr.14229) 211 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:37 vm06 bash[28114]: audit 2026-04-15T13:36:36.798212+0000 mgr.vm06.qbbldl (mgr.14229) 211 : audit [DBG] from='client.14888 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:37 vm06 bash[28114]: audit 2026-04-15T13:36:37.066418+0000 mgr.vm06.qbbldl (mgr.14229) 212 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:37 vm06 bash[28114]: audit 2026-04-15T13:36:37.066418+0000 mgr.vm06.qbbldl (mgr.14229) 212 : audit [DBG] from='client.14892 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:37 vm06 bash[28114]: audit 2026-04-15T13:36:37.313354+0000 mon.vm06 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.106:0/382582481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:38.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:37 vm06 bash[28114]: audit 2026-04-15T13:36:37.313354+0000 mon.vm06 (mon.0) 936 : audit [DBG] from='client.? 192.168.123.106:0/382582481' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:39.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:38 vm09 bash[34466]: cluster 2026-04-15T13:36:38.189333+0000 mgr.vm06.qbbldl (mgr.14229) 213 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:39.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:38 vm09 bash[34466]: cluster 2026-04-15T13:36:38.189333+0000 mgr.vm06.qbbldl (mgr.14229) 213 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:39.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:38 vm09 bash[34466]: audit 2026-04-15T13:36:38.482391+0000 mon.vm06 (mon.0) 937 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:39.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:38 vm09 bash[34466]: audit 2026-04-15T13:36:38.482391+0000 mon.vm06 (mon.0) 937 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:39.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:38 vm06 bash[28114]: cluster 2026-04-15T13:36:38.189333+0000 mgr.vm06.qbbldl (mgr.14229) 213 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:39.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:38 vm06 bash[28114]: cluster 2026-04-15T13:36:38.189333+0000 mgr.vm06.qbbldl (mgr.14229) 213 : cluster [DBG] pgmap v98: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:39.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:38 vm06 bash[28114]: audit 2026-04-15T13:36:38.482391+0000 mon.vm06 (mon.0) 937 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:39.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:38 vm06 bash[28114]: audit 2026-04-15T13:36:38.482391+0000 mon.vm06 (mon.0) 937 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:41.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:41 vm06 bash[28114]: cluster 2026-04-15T13:36:40.189745+0000 mgr.vm06.qbbldl (mgr.14229) 214 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:41.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:41 vm06 bash[28114]: cluster 2026-04-15T13:36:40.189745+0000 mgr.vm06.qbbldl (mgr.14229) 214 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:41.567 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:41 vm09 bash[34466]: cluster 2026-04-15T13:36:40.189745+0000 mgr.vm06.qbbldl (mgr.14229) 214 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:41.567 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:41 vm09 bash[34466]: cluster 2026-04-15T13:36:40.189745+0000 mgr.vm06.qbbldl (mgr.14229) 214 : cluster [DBG] pgmap v99: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:42.546 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:36:42.738 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:42.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (23s) 6s ago 73s 94.1M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:42.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (75s) 6s ago 75s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:42.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (76s) 6s ago 76s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:42.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (74s) 6s ago 74s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:42.986 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:43 vm06 bash[28114]: cluster 2026-04-15T13:36:42.190100+0000 mgr.vm06.qbbldl (mgr.14229) 215 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:43 vm06 bash[28114]: cluster 2026-04-15T13:36:42.190100+0000 mgr.vm06.qbbldl (mgr.14229) 215 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:43 vm06 bash[28114]: audit 2026-04-15T13:36:42.987170+0000 mon.vm06 (mon.0) 938 : audit [DBG] from='client.? 192.168.123.106:0/1401284562' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:43.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:43 vm06 bash[28114]: audit 2026-04-15T13:36:42.987170+0000 mon.vm06 (mon.0) 938 : audit [DBG] from='client.? 192.168.123.106:0/1401284562' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:43 vm09 bash[34466]: cluster 2026-04-15T13:36:42.190100+0000 mgr.vm06.qbbldl (mgr.14229) 215 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:43 vm09 bash[34466]: cluster 2026-04-15T13:36:42.190100+0000 mgr.vm06.qbbldl (mgr.14229) 215 : cluster [DBG] pgmap v100: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:43 vm09 bash[34466]: audit 2026-04-15T13:36:42.987170+0000 mon.vm06 (mon.0) 938 : audit [DBG] from='client.? 192.168.123.106:0/1401284562' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:43 vm09 bash[34466]: audit 2026-04-15T13:36:42.987170+0000 mon.vm06 (mon.0) 938 : audit [DBG] from='client.? 192.168.123.106:0/1401284562' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:44.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:44 vm06 bash[28114]: audit 2026-04-15T13:36:42.527341+0000 mgr.vm06.qbbldl (mgr.14229) 216 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:44 vm06 bash[28114]: audit 2026-04-15T13:36:42.527341+0000 mgr.vm06.qbbldl (mgr.14229) 216 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:44 vm06 bash[28114]: audit 2026-04-15T13:36:42.735189+0000 mgr.vm06.qbbldl (mgr.14229) 217 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:44 vm06 bash[28114]: audit 2026-04-15T13:36:42.735189+0000 mgr.vm06.qbbldl (mgr.14229) 217 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:44 vm09 bash[34466]: audit 2026-04-15T13:36:42.527341+0000 mgr.vm06.qbbldl (mgr.14229) 216 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:44 vm09 bash[34466]: audit 2026-04-15T13:36:42.527341+0000 mgr.vm06.qbbldl (mgr.14229) 216 : audit [DBG] from='client.14900 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:44 vm09 bash[34466]: audit 2026-04-15T13:36:42.735189+0000 mgr.vm06.qbbldl (mgr.14229) 217 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:44 vm09 bash[34466]: audit 2026-04-15T13:36:42.735189+0000 mgr.vm06.qbbldl (mgr.14229) 217 : audit [DBG] from='client.14904 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:45 vm09 bash[34466]: cluster 2026-04-15T13:36:44.190487+0000 mgr.vm06.qbbldl (mgr.14229) 218 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:45 vm09 bash[34466]: cluster 2026-04-15T13:36:44.190487+0000 mgr.vm06.qbbldl (mgr.14229) 218 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:45.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:45 vm06 bash[28114]: cluster 2026-04-15T13:36:44.190487+0000 mgr.vm06.qbbldl (mgr.14229) 218 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:45.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:45 vm06 bash[28114]: cluster 2026-04-15T13:36:44.190487+0000 mgr.vm06.qbbldl (mgr.14229) 218 : cluster [DBG] pgmap v101: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: cluster 2026-04-15T13:36:46.190972+0000 mgr.vm06.qbbldl (mgr.14229) 219 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: cluster 2026-04-15T13:36:46.190972+0000 mgr.vm06.qbbldl (mgr.14229) 219 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: audit 2026-04-15T13:36:46.961930+0000 mon.vm06 (mon.0) 939 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: audit 2026-04-15T13:36:46.961930+0000 mon.vm06 (mon.0) 939 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: audit 2026-04-15T13:36:46.967999+0000 mon.vm06 (mon.0) 940 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: audit 2026-04-15T13:36:46.967999+0000 mon.vm06 (mon.0) 940 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: audit 2026-04-15T13:36:46.970733+0000 mon.vm06 (mon.0) 941 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:47.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:47 vm09 bash[34466]: audit 2026-04-15T13:36:46.970733+0000 mon.vm06 (mon.0) 941 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: cluster 2026-04-15T13:36:46.190972+0000 mgr.vm06.qbbldl (mgr.14229) 219 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: cluster 2026-04-15T13:36:46.190972+0000 mgr.vm06.qbbldl (mgr.14229) 219 : cluster [DBG] pgmap v102: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: audit 2026-04-15T13:36:46.961930+0000 mon.vm06 (mon.0) 939 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: audit 2026-04-15T13:36:46.961930+0000 mon.vm06 (mon.0) 939 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: audit 2026-04-15T13:36:46.967999+0000 mon.vm06 (mon.0) 940 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: audit 2026-04-15T13:36:46.967999+0000 mon.vm06 (mon.0) 940 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: audit 2026-04-15T13:36:46.970733+0000 mon.vm06 (mon.0) 941 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:47.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:47 vm06 bash[28114]: audit 2026-04-15T13:36:46.970733+0000 mon.vm06 (mon.0) 941 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:36:48.223 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:36:48.434 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:48.435 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (28s) 12s ago 79s 94.1M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:48.435 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (81s) 12s ago 81s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 6a48b9c9f47e 2026-04-15T13:36:48.435 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (82s) 12s ago 82s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:48.435 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (80s) 12s ago 80s 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:48.683 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:36:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:49 vm09 bash[34466]: cluster 2026-04-15T13:36:48.191384+0000 mgr.vm06.qbbldl (mgr.14229) 220 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:49 vm09 bash[34466]: cluster 2026-04-15T13:36:48.191384+0000 mgr.vm06.qbbldl (mgr.14229) 220 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:49 vm09 bash[34466]: audit 2026-04-15T13:36:48.204151+0000 mgr.vm06.qbbldl (mgr.14229) 221 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:49 vm09 bash[34466]: audit 2026-04-15T13:36:48.204151+0000 mgr.vm06.qbbldl (mgr.14229) 221 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:49 vm09 bash[34466]: audit 2026-04-15T13:36:48.684517+0000 mon.vm06 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.106:0/2197804961' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:49 vm09 bash[34466]: audit 2026-04-15T13:36:48.684517+0000 mon.vm06 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.106:0/2197804961' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:49.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:49 vm06 bash[28114]: cluster 2026-04-15T13:36:48.191384+0000 mgr.vm06.qbbldl (mgr.14229) 220 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:49.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:49 vm06 bash[28114]: cluster 2026-04-15T13:36:48.191384+0000 mgr.vm06.qbbldl (mgr.14229) 220 : cluster [DBG] pgmap v103: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:49.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:49 vm06 bash[28114]: audit 2026-04-15T13:36:48.204151+0000 mgr.vm06.qbbldl (mgr.14229) 221 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:49.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:49 vm06 bash[28114]: audit 2026-04-15T13:36:48.204151+0000 mgr.vm06.qbbldl (mgr.14229) 221 : audit [DBG] from='client.14912 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:49.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:49 vm06 bash[28114]: audit 2026-04-15T13:36:48.684517+0000 mon.vm06 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.106:0/2197804961' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:49.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:49 vm06 bash[28114]: audit 2026-04-15T13:36:48.684517+0000 mon.vm06 (mon.0) 942 : audit [DBG] from='client.? 192.168.123.106:0/2197804961' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:50 vm09 bash[34466]: audit 2026-04-15T13:36:48.432110+0000 mgr.vm06.qbbldl (mgr.14229) 222 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:50.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:50 vm09 bash[34466]: audit 2026-04-15T13:36:48.432110+0000 mgr.vm06.qbbldl (mgr.14229) 222 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:50.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:50 vm06 bash[28114]: audit 2026-04-15T13:36:48.432110+0000 mgr.vm06.qbbldl (mgr.14229) 222 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:50.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:50 vm06 bash[28114]: audit 2026-04-15T13:36:48.432110+0000 mgr.vm06.qbbldl (mgr.14229) 222 : audit [DBG] from='client.14916 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:51 vm09 bash[34466]: cluster 2026-04-15T13:36:50.191854+0000 mgr.vm06.qbbldl (mgr.14229) 223 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:51 vm09 bash[34466]: cluster 2026-04-15T13:36:50.191854+0000 mgr.vm06.qbbldl (mgr.14229) 223 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:51.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:51 vm06 bash[28114]: cluster 2026-04-15T13:36:50.191854+0000 mgr.vm06.qbbldl (mgr.14229) 223 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:51.762 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:51 vm06 bash[28114]: cluster 2026-04-15T13:36:50.191854+0000 mgr.vm06.qbbldl (mgr.14229) 223 : cluster [DBG] pgmap v104: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 341 B/s rd, 682 B/s wr, 0 op/s 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:51.897628+0000 mon.vm06 (mon.0) 943 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:51.897628+0000 mon.vm06 (mon.0) 943 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:51.902555+0000 mon.vm06 (mon.0) 944 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:51.902555+0000 mon.vm06 (mon.0) 944 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: cluster 2026-04-15T13:36:52.192263+0000 mgr.vm06.qbbldl (mgr.14229) 224 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: cluster 2026-04-15T13:36:52.192263+0000 mgr.vm06.qbbldl (mgr.14229) 224 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.442898+0000 mon.vm06 (mon.0) 945 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.442898+0000 mon.vm06 (mon.0) 945 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.448668+0000 mon.vm06 (mon.0) 946 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.448668+0000 mon.vm06 (mon.0) 946 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.449812+0000 mon.vm06 (mon.0) 947 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.449812+0000 mon.vm06 (mon.0) 947 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.450380+0000 mon.vm06 (mon.0) 948 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.450380+0000 mon.vm06 (mon.0) 948 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.453815+0000 mon.vm06 (mon.0) 949 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.453815+0000 mon.vm06 (mon.0) 949 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.455124+0000 mon.vm06 (mon.0) 950 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:53.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:52 vm06 bash[28114]: audit 2026-04-15T13:36:52.455124+0000 mon.vm06 (mon.0) 950 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:51.897628+0000 mon.vm06 (mon.0) 943 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:51.897628+0000 mon.vm06 (mon.0) 943 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:51.902555+0000 mon.vm06 (mon.0) 944 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:51.902555+0000 mon.vm06 (mon.0) 944 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: cluster 2026-04-15T13:36:52.192263+0000 mgr.vm06.qbbldl (mgr.14229) 224 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: cluster 2026-04-15T13:36:52.192263+0000 mgr.vm06.qbbldl (mgr.14229) 224 : cluster [DBG] pgmap v105: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.442898+0000 mon.vm06 (mon.0) 945 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.442898+0000 mon.vm06 (mon.0) 945 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.448668+0000 mon.vm06 (mon.0) 946 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.448668+0000 mon.vm06 (mon.0) 946 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.449812+0000 mon.vm06 (mon.0) 947 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.449812+0000 mon.vm06 (mon.0) 947 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.450380+0000 mon.vm06 (mon.0) 948 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.450380+0000 mon.vm06 (mon.0) 948 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.453815+0000 mon.vm06 (mon.0) 949 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.453815+0000 mon.vm06 (mon.0) 949 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.455124+0000 mon.vm06 (mon.0) 950 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:52 vm09 bash[34466]: audit 2026-04-15T13:36:52.455124+0000 mon.vm06 (mon.0) 950 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:36:53.932 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:36:54.143 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:54.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (34s) 1s ago 84s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:54.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 1s ago 86s - - 2026-04-15T13:36:54.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (87s) 2s ago 87s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:54.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (85s) 2s ago 85s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: cluster 2026-04-15T13:36:52.451458+0000 mgr.vm06.qbbldl (mgr.14229) 225 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: cluster 2026-04-15T13:36:52.451458+0000 mgr.vm06.qbbldl (mgr.14229) 225 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: cluster 2026-04-15T13:36:52.901901+0000 mon.vm06 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: cluster 2026-04-15T13:36:52.901901+0000 mon.vm06 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: audit 2026-04-15T13:36:53.488055+0000 mon.vm06 (mon.0) 952 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: audit 2026-04-15T13:36:53.488055+0000 mon.vm06 (mon.0) 952 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: audit 2026-04-15T13:36:53.489170+0000 mon.vm06 (mon.0) 953 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:54.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:53 vm06 bash[28114]: audit 2026-04-15T13:36:53.489170+0000 mon.vm06 (mon.0) 953 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: cluster 2026-04-15T13:36:52.451458+0000 mgr.vm06.qbbldl (mgr.14229) 225 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: cluster 2026-04-15T13:36:52.451458+0000 mgr.vm06.qbbldl (mgr.14229) 225 : cluster [DBG] pgmap v106: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: cluster 2026-04-15T13:36:52.901901+0000 mon.vm06 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: cluster 2026-04-15T13:36:52.901901+0000 mon.vm06 (mon.0) 951 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: audit 2026-04-15T13:36:53.488055+0000 mon.vm06 (mon.0) 952 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: audit 2026-04-15T13:36:53.488055+0000 mon.vm06 (mon.0) 952 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: audit 2026-04-15T13:36:53.489170+0000 mon.vm06 (mon.0) 953 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:53 vm09 bash[34466]: audit 2026-04-15T13:36:53.489170+0000 mon.vm06 (mon.0) 953 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:36:54.401 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:36:54.401 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:36:54.401 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:36:55.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:54 vm06 bash[28114]: audit 2026-04-15T13:36:53.910282+0000 mgr.vm06.qbbldl (mgr.14229) 226 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:54 vm06 bash[28114]: audit 2026-04-15T13:36:53.910282+0000 mgr.vm06.qbbldl (mgr.14229) 226 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:54 vm06 bash[28114]: audit 2026-04-15T13:36:54.140206+0000 mgr.vm06.qbbldl (mgr.14229) 227 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:54 vm06 bash[28114]: audit 2026-04-15T13:36:54.140206+0000 mgr.vm06.qbbldl (mgr.14229) 227 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:54 vm06 bash[28114]: audit 2026-04-15T13:36:54.401607+0000 mon.vm06 (mon.0) 954 : audit [DBG] from='client.? 192.168.123.106:0/1388691145' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:55.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:54 vm06 bash[28114]: audit 2026-04-15T13:36:54.401607+0000 mon.vm06 (mon.0) 954 : audit [DBG] from='client.? 192.168.123.106:0/1388691145' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:55.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:54 vm09 bash[34466]: audit 2026-04-15T13:36:53.910282+0000 mgr.vm06.qbbldl (mgr.14229) 226 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:54 vm09 bash[34466]: audit 2026-04-15T13:36:53.910282+0000 mgr.vm06.qbbldl (mgr.14229) 226 : audit [DBG] from='client.14924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:54 vm09 bash[34466]: audit 2026-04-15T13:36:54.140206+0000 mgr.vm06.qbbldl (mgr.14229) 227 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:54 vm09 bash[34466]: audit 2026-04-15T13:36:54.140206+0000 mgr.vm06.qbbldl (mgr.14229) 227 : audit [DBG] from='client.14928 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:36:55.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:54 vm09 bash[34466]: audit 2026-04-15T13:36:54.401607+0000 mon.vm06 (mon.0) 954 : audit [DBG] from='client.? 192.168.123.106:0/1388691145' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:55.360 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:54 vm09 bash[34466]: audit 2026-04-15T13:36:54.401607+0000 mon.vm06 (mon.0) 954 : audit [DBG] from='client.? 192.168.123.106:0/1388691145' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:36:56.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:55 vm06 bash[28114]: cluster 2026-04-15T13:36:54.451971+0000 mgr.vm06.qbbldl (mgr.14229) 228 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:56.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:55 vm06 bash[28114]: cluster 2026-04-15T13:36:54.451971+0000 mgr.vm06.qbbldl (mgr.14229) 228 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:56.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:55 vm09 bash[34466]: cluster 2026-04-15T13:36:54.451971+0000 mgr.vm06.qbbldl (mgr.14229) 228 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:56.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:55 vm09 bash[34466]: cluster 2026-04-15T13:36:54.451971+0000 mgr.vm06.qbbldl (mgr.14229) 228 : cluster [DBG] pgmap v107: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:58.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:57 vm06 bash[28114]: cluster 2026-04-15T13:36:56.452604+0000 mgr.vm06.qbbldl (mgr.14229) 229 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:58.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:57 vm06 bash[28114]: cluster 2026-04-15T13:36:56.452604+0000 mgr.vm06.qbbldl (mgr.14229) 229 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:58.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:57 vm09 bash[34466]: cluster 2026-04-15T13:36:56.452604+0000 mgr.vm06.qbbldl (mgr.14229) 229 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:58.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:57 vm09 bash[34466]: cluster 2026-04-15T13:36:56.452604+0000 mgr.vm06.qbbldl (mgr.14229) 229 : cluster [DBG] pgmap v108: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:36:59.648 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:36:59.844 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:36:59.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (40s) 7s ago 90s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:36:59.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 7s ago 92s - - 2026-04-15T13:36:59.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (93s) 7s ago 93s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:36:59.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (91s) 7s ago 91s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:00.094 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:00.095 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:00.095 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:00.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:59 vm06 bash[28114]: cluster 2026-04-15T13:36:58.453060+0000 mgr.vm06.qbbldl (mgr.14229) 230 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:00.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:36:59 vm06 bash[28114]: cluster 2026-04-15T13:36:58.453060+0000 mgr.vm06.qbbldl (mgr.14229) 230 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:00.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:59 vm09 bash[34466]: cluster 2026-04-15T13:36:58.453060+0000 mgr.vm06.qbbldl (mgr.14229) 230 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:00.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:36:59 vm09 bash[34466]: cluster 2026-04-15T13:36:58.453060+0000 mgr.vm06.qbbldl (mgr.14229) 230 : cluster [DBG] pgmap v109: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:01.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:00 vm06 bash[28114]: audit 2026-04-15T13:36:59.628101+0000 mgr.vm06.qbbldl (mgr.14229) 231 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:00 vm06 bash[28114]: audit 2026-04-15T13:36:59.628101+0000 mgr.vm06.qbbldl (mgr.14229) 231 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:00 vm06 bash[28114]: audit 2026-04-15T13:36:59.841688+0000 mgr.vm06.qbbldl (mgr.14229) 232 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:00 vm06 bash[28114]: audit 2026-04-15T13:36:59.841688+0000 mgr.vm06.qbbldl (mgr.14229) 232 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:00 vm06 bash[28114]: audit 2026-04-15T13:37:00.095023+0000 mon.vm06 (mon.0) 955 : audit [DBG] from='client.? 192.168.123.106:0/2793487826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:01.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:00 vm06 bash[28114]: audit 2026-04-15T13:37:00.095023+0000 mon.vm06 (mon.0) 955 : audit [DBG] from='client.? 192.168.123.106:0/2793487826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:00 vm09 bash[34466]: audit 2026-04-15T13:36:59.628101+0000 mgr.vm06.qbbldl (mgr.14229) 231 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:00 vm09 bash[34466]: audit 2026-04-15T13:36:59.628101+0000 mgr.vm06.qbbldl (mgr.14229) 231 : audit [DBG] from='client.14936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:00 vm09 bash[34466]: audit 2026-04-15T13:36:59.841688+0000 mgr.vm06.qbbldl (mgr.14229) 232 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:00 vm09 bash[34466]: audit 2026-04-15T13:36:59.841688+0000 mgr.vm06.qbbldl (mgr.14229) 232 : audit [DBG] from='client.14940 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:00 vm09 bash[34466]: audit 2026-04-15T13:37:00.095023+0000 mon.vm06 (mon.0) 955 : audit [DBG] from='client.? 192.168.123.106:0/2793487826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:00 vm09 bash[34466]: audit 2026-04-15T13:37:00.095023+0000 mon.vm06 (mon.0) 955 : audit [DBG] from='client.? 192.168.123.106:0/2793487826' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:02.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:01 vm06 bash[28114]: cluster 2026-04-15T13:37:00.453670+0000 mgr.vm06.qbbldl (mgr.14229) 233 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:02.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:01 vm06 bash[28114]: cluster 2026-04-15T13:37:00.453670+0000 mgr.vm06.qbbldl (mgr.14229) 233 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:02.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:01 vm09 bash[34466]: cluster 2026-04-15T13:37:00.453670+0000 mgr.vm06.qbbldl (mgr.14229) 233 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:02.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:01 vm09 bash[34466]: cluster 2026-04-15T13:37:00.453670+0000 mgr.vm06.qbbldl (mgr.14229) 233 : cluster [DBG] pgmap v110: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:04.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:03 vm06 bash[28114]: cluster 2026-04-15T13:37:02.454140+0000 mgr.vm06.qbbldl (mgr.14229) 234 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:04.262 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:03 vm06 bash[28114]: cluster 2026-04-15T13:37:02.454140+0000 mgr.vm06.qbbldl (mgr.14229) 234 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:03 vm09 bash[34466]: cluster 2026-04-15T13:37:02.454140+0000 mgr.vm06.qbbldl (mgr.14229) 234 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:03 vm09 bash[34466]: cluster 2026-04-15T13:37:02.454140+0000 mgr.vm06.qbbldl (mgr.14229) 234 : cluster [DBG] pgmap v111: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 B/s rd, 199 B/s wr, 0 op/s 2026-04-15T13:37:05.314 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:05.500 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:05.501 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (46s) 13s ago 96s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:05.501 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 13s ago 98s - - 2026-04-15T13:37:05.501 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (99s) 13s ago 99s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:05.501 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (97s) 13s ago 97s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:05.757 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:05.757 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:05.758 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:06 vm09 bash[34466]: cluster 2026-04-15T13:37:04.454602+0000 mgr.vm06.qbbldl (mgr.14229) 235 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:06 vm09 bash[34466]: cluster 2026-04-15T13:37:04.454602+0000 mgr.vm06.qbbldl (mgr.14229) 235 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:06 vm09 bash[34466]: audit 2026-04-15T13:37:05.295967+0000 mgr.vm06.qbbldl (mgr.14229) 236 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:06 vm09 bash[34466]: audit 2026-04-15T13:37:05.295967+0000 mgr.vm06.qbbldl (mgr.14229) 236 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:06 vm09 bash[34466]: audit 2026-04-15T13:37:05.757935+0000 mon.vm06 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.106:0/3253348884' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:06 vm09 bash[34466]: audit 2026-04-15T13:37:05.757935+0000 mon.vm06 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.106:0/3253348884' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:06.512 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:06 vm06 bash[28114]: cluster 2026-04-15T13:37:04.454602+0000 mgr.vm06.qbbldl (mgr.14229) 235 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:06.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:06 vm06 bash[28114]: cluster 2026-04-15T13:37:04.454602+0000 mgr.vm06.qbbldl (mgr.14229) 235 : cluster [DBG] pgmap v112: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:06.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:06 vm06 bash[28114]: audit 2026-04-15T13:37:05.295967+0000 mgr.vm06.qbbldl (mgr.14229) 236 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:06.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:06 vm06 bash[28114]: audit 2026-04-15T13:37:05.295967+0000 mgr.vm06.qbbldl (mgr.14229) 236 : audit [DBG] from='client.14948 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:06.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:06 vm06 bash[28114]: audit 2026-04-15T13:37:05.757935+0000 mon.vm06 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.106:0/3253348884' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:06.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:06 vm06 bash[28114]: audit 2026-04-15T13:37:05.757935+0000 mon.vm06 (mon.0) 956 : audit [DBG] from='client.? 192.168.123.106:0/3253348884' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:07 vm09 bash[34466]: audit 2026-04-15T13:37:05.498217+0000 mgr.vm06.qbbldl (mgr.14229) 237 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:07 vm09 bash[34466]: audit 2026-04-15T13:37:05.498217+0000 mgr.vm06.qbbldl (mgr.14229) 237 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:07.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:07 vm06 bash[28114]: audit 2026-04-15T13:37:05.498217+0000 mgr.vm06.qbbldl (mgr.14229) 237 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:07.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:07 vm06 bash[28114]: audit 2026-04-15T13:37:05.498217+0000 mgr.vm06.qbbldl (mgr.14229) 237 : audit [DBG] from='client.14952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:08 vm09 bash[34466]: cluster 2026-04-15T13:37:06.454997+0000 mgr.vm06.qbbldl (mgr.14229) 238 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:08 vm09 bash[34466]: cluster 2026-04-15T13:37:06.454997+0000 mgr.vm06.qbbldl (mgr.14229) 238 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:08.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:08 vm06 bash[28114]: cluster 2026-04-15T13:37:06.454997+0000 mgr.vm06.qbbldl (mgr.14229) 238 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:08.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:08 vm06 bash[28114]: cluster 2026-04-15T13:37:06.454997+0000 mgr.vm06.qbbldl (mgr.14229) 238 : cluster [DBG] pgmap v113: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:09.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:09 vm06 bash[28114]: audit 2026-04-15T13:37:08.482817+0000 mon.vm06 (mon.0) 957 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:09.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:09 vm06 bash[28114]: audit 2026-04-15T13:37:08.482817+0000 mon.vm06 (mon.0) 957 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:09 vm09 bash[34466]: audit 2026-04-15T13:37:08.482817+0000 mon.vm06 (mon.0) 957 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:09 vm09 bash[34466]: audit 2026-04-15T13:37:08.482817+0000 mon.vm06 (mon.0) 957 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:10.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:10 vm09 bash[34466]: cluster 2026-04-15T13:37:08.455379+0000 mgr.vm06.qbbldl (mgr.14229) 239 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:10.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:10 vm09 bash[34466]: cluster 2026-04-15T13:37:08.455379+0000 mgr.vm06.qbbldl (mgr.14229) 239 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:10.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:10 vm06 bash[28114]: cluster 2026-04-15T13:37:08.455379+0000 mgr.vm06.qbbldl (mgr.14229) 239 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:10.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:10 vm06 bash[28114]: cluster 2026-04-15T13:37:08.455379+0000 mgr.vm06.qbbldl (mgr.14229) 239 : cluster [DBG] pgmap v114: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:10.972 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:11.169 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:11.169 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (51s) 18s ago 101s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:11.169 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 18s ago 103s - - 2026-04-15T13:37:11.169 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (104s) 19s ago 104s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:11.169 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (102s) 19s ago 102s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:11.405 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:11.405 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:11.405 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: cluster 2026-04-15T13:37:10.455877+0000 mgr.vm06.qbbldl (mgr.14229) 240 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: cluster 2026-04-15T13:37:10.455877+0000 mgr.vm06.qbbldl (mgr.14229) 240 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: audit 2026-04-15T13:37:10.955142+0000 mgr.vm06.qbbldl (mgr.14229) 241 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: audit 2026-04-15T13:37:10.955142+0000 mgr.vm06.qbbldl (mgr.14229) 241 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: audit 2026-04-15T13:37:11.166151+0000 mgr.vm06.qbbldl (mgr.14229) 242 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: audit 2026-04-15T13:37:11.166151+0000 mgr.vm06.qbbldl (mgr.14229) 242 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: audit 2026-04-15T13:37:11.405188+0000 mon.vm06 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.106:0/3290094874' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:12.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:12 vm09 bash[34466]: audit 2026-04-15T13:37:11.405188+0000 mon.vm06 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.106:0/3290094874' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: cluster 2026-04-15T13:37:10.455877+0000 mgr.vm06.qbbldl (mgr.14229) 240 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: cluster 2026-04-15T13:37:10.455877+0000 mgr.vm06.qbbldl (mgr.14229) 240 : cluster [DBG] pgmap v115: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: audit 2026-04-15T13:37:10.955142+0000 mgr.vm06.qbbldl (mgr.14229) 241 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: audit 2026-04-15T13:37:10.955142+0000 mgr.vm06.qbbldl (mgr.14229) 241 : audit [DBG] from='client.14960 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: audit 2026-04-15T13:37:11.166151+0000 mgr.vm06.qbbldl (mgr.14229) 242 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: audit 2026-04-15T13:37:11.166151+0000 mgr.vm06.qbbldl (mgr.14229) 242 : audit [DBG] from='client.14964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: audit 2026-04-15T13:37:11.405188+0000 mon.vm06 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.106:0/3290094874' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:12.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:12 vm06 bash[28114]: audit 2026-04-15T13:37:11.405188+0000 mon.vm06 (mon.0) 958 : audit [DBG] from='client.? 192.168.123.106:0/3290094874' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:14.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:14 vm09 bash[34466]: cluster 2026-04-15T13:37:12.456259+0000 mgr.vm06.qbbldl (mgr.14229) 243 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:14.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:14 vm09 bash[34466]: cluster 2026-04-15T13:37:12.456259+0000 mgr.vm06.qbbldl (mgr.14229) 243 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:14.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:14 vm06 bash[28114]: cluster 2026-04-15T13:37:12.456259+0000 mgr.vm06.qbbldl (mgr.14229) 243 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:14.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:14 vm06 bash[28114]: cluster 2026-04-15T13:37:12.456259+0000 mgr.vm06.qbbldl (mgr.14229) 243 : cluster [DBG] pgmap v116: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:16.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:16 vm09 bash[34466]: cluster 2026-04-15T13:37:14.456778+0000 mgr.vm06.qbbldl (mgr.14229) 244 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:16.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:16 vm09 bash[34466]: cluster 2026-04-15T13:37:14.456778+0000 mgr.vm06.qbbldl (mgr.14229) 244 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:16.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:16 vm06 bash[28114]: cluster 2026-04-15T13:37:14.456778+0000 mgr.vm06.qbbldl (mgr.14229) 244 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:16.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:16 vm06 bash[28114]: cluster 2026-04-15T13:37:14.456778+0000 mgr.vm06.qbbldl (mgr.14229) 244 : cluster [DBG] pgmap v117: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:16.621 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:16.820 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:16.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (57s) 24s ago 107s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:16.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 24s ago 109s - - 2026-04-15T13:37:16.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (110s) 24s ago 110s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:16.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (108s) 24s ago 108s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:17.056 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:17.056 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:17.056 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:17 vm09 bash[34466]: audit 2026-04-15T13:37:17.055874+0000 mon.vm06 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.106:0/3040639098' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:17 vm09 bash[34466]: audit 2026-04-15T13:37:17.055874+0000 mon.vm06 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.106:0/3040639098' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:17 vm06 bash[28114]: audit 2026-04-15T13:37:17.055874+0000 mon.vm06 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.106:0/3040639098' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:17.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:17 vm06 bash[28114]: audit 2026-04-15T13:37:17.055874+0000 mon.vm06 (mon.0) 959 : audit [DBG] from='client.? 192.168.123.106:0/3040639098' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:18.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:18 vm06 bash[28114]: cluster 2026-04-15T13:37:16.457175+0000 mgr.vm06.qbbldl (mgr.14229) 245 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:18.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:18 vm06 bash[28114]: cluster 2026-04-15T13:37:16.457175+0000 mgr.vm06.qbbldl (mgr.14229) 245 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:18.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:18 vm06 bash[28114]: audit 2026-04-15T13:37:16.603886+0000 mgr.vm06.qbbldl (mgr.14229) 246 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:18 vm06 bash[28114]: audit 2026-04-15T13:37:16.603886+0000 mgr.vm06.qbbldl (mgr.14229) 246 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:18 vm06 bash[28114]: audit 2026-04-15T13:37:16.817106+0000 mgr.vm06.qbbldl (mgr.14229) 247 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.263 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:18 vm06 bash[28114]: audit 2026-04-15T13:37:16.817106+0000 mgr.vm06.qbbldl (mgr.14229) 247 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:18 vm09 bash[34466]: cluster 2026-04-15T13:37:16.457175+0000 mgr.vm06.qbbldl (mgr.14229) 245 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:18 vm09 bash[34466]: cluster 2026-04-15T13:37:16.457175+0000 mgr.vm06.qbbldl (mgr.14229) 245 : cluster [DBG] pgmap v118: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:18 vm09 bash[34466]: audit 2026-04-15T13:37:16.603886+0000 mgr.vm06.qbbldl (mgr.14229) 246 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:18 vm09 bash[34466]: audit 2026-04-15T13:37:16.603886+0000 mgr.vm06.qbbldl (mgr.14229) 246 : audit [DBG] from='client.14972 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:18 vm09 bash[34466]: audit 2026-04-15T13:37:16.817106+0000 mgr.vm06.qbbldl (mgr.14229) 247 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:18 vm09 bash[34466]: audit 2026-04-15T13:37:16.817106+0000 mgr.vm06.qbbldl (mgr.14229) 247 : audit [DBG] from='client.14976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:20.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:20 vm06 bash[28114]: cluster 2026-04-15T13:37:18.457716+0000 mgr.vm06.qbbldl (mgr.14229) 248 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:20.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:20 vm06 bash[28114]: cluster 2026-04-15T13:37:18.457716+0000 mgr.vm06.qbbldl (mgr.14229) 248 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:20.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:20 vm09 bash[34466]: cluster 2026-04-15T13:37:18.457716+0000 mgr.vm06.qbbldl (mgr.14229) 248 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:20 vm09 bash[34466]: cluster 2026-04-15T13:37:18.457716+0000 mgr.vm06.qbbldl (mgr.14229) 248 : cluster [DBG] pgmap v119: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:22.277 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:22.475 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:22.475 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (63s) 30s ago 113s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:22.475 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 30s ago 115s - - 2026-04-15T13:37:22.475 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (116s) 30s ago 116s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:22.475 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (114s) 30s ago 114s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:22.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:22 vm06 bash[28114]: cluster 2026-04-15T13:37:20.458165+0000 mgr.vm06.qbbldl (mgr.14229) 249 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:22.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:22 vm06 bash[28114]: cluster 2026-04-15T13:37:20.458165+0000 mgr.vm06.qbbldl (mgr.14229) 249 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:22 vm09 bash[34466]: cluster 2026-04-15T13:37:20.458165+0000 mgr.vm06.qbbldl (mgr.14229) 249 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:22.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:22 vm09 bash[34466]: cluster 2026-04-15T13:37:20.458165+0000 mgr.vm06.qbbldl (mgr.14229) 249 : cluster [DBG] pgmap v120: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:22.720 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:22.720 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:22.720 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:23.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:23 vm06 bash[28114]: audit 2026-04-15T13:37:22.261570+0000 mgr.vm06.qbbldl (mgr.14229) 250 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:23.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:23 vm06 bash[28114]: audit 2026-04-15T13:37:22.261570+0000 mgr.vm06.qbbldl (mgr.14229) 250 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:23.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:23 vm06 bash[28114]: audit 2026-04-15T13:37:22.719660+0000 mon.vm06 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.106:0/2527566976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:23.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:23 vm06 bash[28114]: audit 2026-04-15T13:37:22.719660+0000 mon.vm06 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.106:0/2527566976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:23 vm09 bash[34466]: audit 2026-04-15T13:37:22.261570+0000 mgr.vm06.qbbldl (mgr.14229) 250 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:23 vm09 bash[34466]: audit 2026-04-15T13:37:22.261570+0000 mgr.vm06.qbbldl (mgr.14229) 250 : audit [DBG] from='client.14984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:23 vm09 bash[34466]: audit 2026-04-15T13:37:22.719660+0000 mon.vm06 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.106:0/2527566976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:23 vm09 bash[34466]: audit 2026-04-15T13:37:22.719660+0000 mon.vm06 (mon.0) 960 : audit [DBG] from='client.? 192.168.123.106:0/2527566976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:24.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:24 vm06 bash[28114]: cluster 2026-04-15T13:37:22.458483+0000 mgr.vm06.qbbldl (mgr.14229) 251 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:24.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:24 vm06 bash[28114]: cluster 2026-04-15T13:37:22.458483+0000 mgr.vm06.qbbldl (mgr.14229) 251 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:24.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:24 vm06 bash[28114]: audit 2026-04-15T13:37:22.469844+0000 mgr.vm06.qbbldl (mgr.14229) 252 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:24.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:24 vm06 bash[28114]: audit 2026-04-15T13:37:22.469844+0000 mgr.vm06.qbbldl (mgr.14229) 252 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:24.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:24 vm06 bash[28114]: audit 2026-04-15T13:37:23.483588+0000 mon.vm06 (mon.0) 961 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:24.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:24 vm06 bash[28114]: audit 2026-04-15T13:37:23.483588+0000 mon.vm06 (mon.0) 961 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:24 vm09 bash[34466]: cluster 2026-04-15T13:37:22.458483+0000 mgr.vm06.qbbldl (mgr.14229) 251 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:24 vm09 bash[34466]: cluster 2026-04-15T13:37:22.458483+0000 mgr.vm06.qbbldl (mgr.14229) 251 : cluster [DBG] pgmap v121: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:24 vm09 bash[34466]: audit 2026-04-15T13:37:22.469844+0000 mgr.vm06.qbbldl (mgr.14229) 252 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:24 vm09 bash[34466]: audit 2026-04-15T13:37:22.469844+0000 mgr.vm06.qbbldl (mgr.14229) 252 : audit [DBG] from='client.14988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:24 vm09 bash[34466]: audit 2026-04-15T13:37:23.483588+0000 mon.vm06 (mon.0) 961 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:24 vm09 bash[34466]: audit 2026-04-15T13:37:23.483588+0000 mon.vm06 (mon.0) 961 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:26.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:26 vm06 bash[28114]: cluster 2026-04-15T13:37:24.458927+0000 mgr.vm06.qbbldl (mgr.14229) 253 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:26.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:26 vm06 bash[28114]: cluster 2026-04-15T13:37:24.458927+0000 mgr.vm06.qbbldl (mgr.14229) 253 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:26 vm09 bash[34466]: cluster 2026-04-15T13:37:24.458927+0000 mgr.vm06.qbbldl (mgr.14229) 253 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:26 vm09 bash[34466]: cluster 2026-04-15T13:37:24.458927+0000 mgr.vm06.qbbldl (mgr.14229) 253 : cluster [DBG] pgmap v122: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:27.950 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:28.145 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:28.145 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (68s) 35s ago 118s 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:28.145 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 35s ago 2m - - 2026-04-15T13:37:28.145 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 36s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:28.145 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (119s) 36s ago 119s 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:28.404 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:28.404 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:28.404 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:28.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:28 vm06 bash[28114]: cluster 2026-04-15T13:37:26.459406+0000 mgr.vm06.qbbldl (mgr.14229) 254 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:28.513 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:28 vm06 bash[28114]: cluster 2026-04-15T13:37:26.459406+0000 mgr.vm06.qbbldl (mgr.14229) 254 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:28 vm09 bash[34466]: cluster 2026-04-15T13:37:26.459406+0000 mgr.vm06.qbbldl (mgr.14229) 254 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:28 vm09 bash[34466]: cluster 2026-04-15T13:37:26.459406+0000 mgr.vm06.qbbldl (mgr.14229) 254 : cluster [DBG] pgmap v123: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:29.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:29 vm06 bash[28114]: audit 2026-04-15T13:37:27.928753+0000 mgr.vm06.qbbldl (mgr.14229) 255 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:29 vm06 bash[28114]: audit 2026-04-15T13:37:27.928753+0000 mgr.vm06.qbbldl (mgr.14229) 255 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:29 vm06 bash[28114]: audit 2026-04-15T13:37:28.141425+0000 mgr.vm06.qbbldl (mgr.14229) 256 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:29 vm06 bash[28114]: audit 2026-04-15T13:37:28.141425+0000 mgr.vm06.qbbldl (mgr.14229) 256 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:29 vm06 bash[28114]: audit 2026-04-15T13:37:28.403294+0000 mon.vm06 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.106:0/1422956742' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:29.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:29 vm06 bash[28114]: audit 2026-04-15T13:37:28.403294+0000 mon.vm06 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.106:0/1422956742' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:29 vm09 bash[34466]: audit 2026-04-15T13:37:27.928753+0000 mgr.vm06.qbbldl (mgr.14229) 255 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:29 vm09 bash[34466]: audit 2026-04-15T13:37:27.928753+0000 mgr.vm06.qbbldl (mgr.14229) 255 : audit [DBG] from='client.14996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:29 vm09 bash[34466]: audit 2026-04-15T13:37:28.141425+0000 mgr.vm06.qbbldl (mgr.14229) 256 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:29 vm09 bash[34466]: audit 2026-04-15T13:37:28.141425+0000 mgr.vm06.qbbldl (mgr.14229) 256 : audit [DBG] from='client.15000 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:29 vm09 bash[34466]: audit 2026-04-15T13:37:28.403294+0000 mon.vm06 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.106:0/1422956742' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:29 vm09 bash[34466]: audit 2026-04-15T13:37:28.403294+0000 mon.vm06 (mon.0) 962 : audit [DBG] from='client.? 192.168.123.106:0/1422956742' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:30 vm06 bash[28114]: cluster 2026-04-15T13:37:28.459838+0000 mgr.vm06.qbbldl (mgr.14229) 257 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:30.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:30 vm06 bash[28114]: cluster 2026-04-15T13:37:28.459838+0000 mgr.vm06.qbbldl (mgr.14229) 257 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:30 vm09 bash[34466]: cluster 2026-04-15T13:37:28.459838+0000 mgr.vm06.qbbldl (mgr.14229) 257 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:30 vm09 bash[34466]: cluster 2026-04-15T13:37:28.459838+0000 mgr.vm06.qbbldl (mgr.14229) 257 : cluster [DBG] pgmap v124: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:31.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:31 vm06 bash[28114]: cluster 2026-04-15T13:37:30.460300+0000 mgr.vm06.qbbldl (mgr.14229) 258 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:31.763 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:31 vm06 bash[28114]: cluster 2026-04-15T13:37:30.460300+0000 mgr.vm06.qbbldl (mgr.14229) 258 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:31 vm09 bash[34466]: cluster 2026-04-15T13:37:30.460300+0000 mgr.vm06.qbbldl (mgr.14229) 258 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:31 vm09 bash[34466]: cluster 2026-04-15T13:37:30.460300+0000 mgr.vm06.qbbldl (mgr.14229) 258 : cluster [DBG] pgmap v125: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:33.619 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:33.806 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:33.806 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (74s) 41s ago 2m 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:33.806 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 41s ago 2m - - 2026-04-15T13:37:33.806 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 41s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:33.806 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 41s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:33.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:33 vm09 bash[34466]: cluster 2026-04-15T13:37:32.460722+0000 mgr.vm06.qbbldl (mgr.14229) 259 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:33 vm09 bash[34466]: cluster 2026-04-15T13:37:32.460722+0000 mgr.vm06.qbbldl (mgr.14229) 259 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:34.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:33 vm06 bash[28114]: cluster 2026-04-15T13:37:32.460722+0000 mgr.vm06.qbbldl (mgr.14229) 259 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:34.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:33 vm06 bash[28114]: cluster 2026-04-15T13:37:32.460722+0000 mgr.vm06.qbbldl (mgr.14229) 259 : cluster [DBG] pgmap v126: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:34.056 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:34.056 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:34.056 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:34 vm09 bash[34466]: audit 2026-04-15T13:37:33.601309+0000 mgr.vm06.qbbldl (mgr.14229) 260 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:34 vm09 bash[34466]: audit 2026-04-15T13:37:33.601309+0000 mgr.vm06.qbbldl (mgr.14229) 260 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:34 vm09 bash[34466]: audit 2026-04-15T13:37:33.802311+0000 mgr.vm06.qbbldl (mgr.14229) 261 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:34 vm09 bash[34466]: audit 2026-04-15T13:37:33.802311+0000 mgr.vm06.qbbldl (mgr.14229) 261 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:34 vm09 bash[34466]: audit 2026-04-15T13:37:34.055038+0000 mon.vm06 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.106:0/1470682807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:34 vm09 bash[34466]: audit 2026-04-15T13:37:34.055038+0000 mon.vm06 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.106:0/1470682807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:35.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:34 vm06 bash[28114]: audit 2026-04-15T13:37:33.601309+0000 mgr.vm06.qbbldl (mgr.14229) 260 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:35.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:34 vm06 bash[28114]: audit 2026-04-15T13:37:33.601309+0000 mgr.vm06.qbbldl (mgr.14229) 260 : audit [DBG] from='client.15008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:35.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:34 vm06 bash[28114]: audit 2026-04-15T13:37:33.802311+0000 mgr.vm06.qbbldl (mgr.14229) 261 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:35.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:34 vm06 bash[28114]: audit 2026-04-15T13:37:33.802311+0000 mgr.vm06.qbbldl (mgr.14229) 261 : audit [DBG] from='client.15012 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:35.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:34 vm06 bash[28114]: audit 2026-04-15T13:37:34.055038+0000 mon.vm06 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.106:0/1470682807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:35.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:34 vm06 bash[28114]: audit 2026-04-15T13:37:34.055038+0000 mon.vm06 (mon.0) 963 : audit [DBG] from='client.? 192.168.123.106:0/1470682807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:35 vm09 bash[34466]: cluster 2026-04-15T13:37:34.461139+0000 mgr.vm06.qbbldl (mgr.14229) 262 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:35 vm09 bash[34466]: cluster 2026-04-15T13:37:34.461139+0000 mgr.vm06.qbbldl (mgr.14229) 262 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:36.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:35 vm06 bash[28114]: cluster 2026-04-15T13:37:34.461139+0000 mgr.vm06.qbbldl (mgr.14229) 262 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:36.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:35 vm06 bash[28114]: cluster 2026-04-15T13:37:34.461139+0000 mgr.vm06.qbbldl (mgr.14229) 262 : cluster [DBG] pgmap v127: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:37 vm09 bash[34466]: cluster 2026-04-15T13:37:36.461513+0000 mgr.vm06.qbbldl (mgr.14229) 263 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:37 vm09 bash[34466]: cluster 2026-04-15T13:37:36.461513+0000 mgr.vm06.qbbldl (mgr.14229) 263 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:38.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:37 vm06 bash[28114]: cluster 2026-04-15T13:37:36.461513+0000 mgr.vm06.qbbldl (mgr.14229) 263 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:38.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:37 vm06 bash[28114]: cluster 2026-04-15T13:37:36.461513+0000 mgr.vm06.qbbldl (mgr.14229) 263 : cluster [DBG] pgmap v128: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:39.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:38 vm06 bash[28114]: audit 2026-04-15T13:37:38.483441+0000 mon.vm06 (mon.0) 964 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:39.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:38 vm06 bash[28114]: audit 2026-04-15T13:37:38.483441+0000 mon.vm06 (mon.0) 964 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:39.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:38 vm09 bash[34466]: audit 2026-04-15T13:37:38.483441+0000 mon.vm06 (mon.0) 964 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:39.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:38 vm09 bash[34466]: audit 2026-04-15T13:37:38.483441+0000 mon.vm06 (mon.0) 964 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:39.268 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:39.448 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:39.448 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (79s) 47s ago 2m 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:39.448 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 47s ago 2m - - 2026-04-15T13:37:39.448 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 47s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:39.448 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 47s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:39.698 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:39.698 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:39.698 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:40.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:39 vm06 bash[28114]: cluster 2026-04-15T13:37:38.462520+0000 mgr.vm06.qbbldl (mgr.14229) 264 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:40.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:39 vm06 bash[28114]: cluster 2026-04-15T13:37:38.462520+0000 mgr.vm06.qbbldl (mgr.14229) 264 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:40.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:39 vm06 bash[28114]: audit 2026-04-15T13:37:39.247709+0000 mgr.vm06.qbbldl (mgr.14229) 265 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:40.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:39 vm06 bash[28114]: audit 2026-04-15T13:37:39.247709+0000 mgr.vm06.qbbldl (mgr.14229) 265 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:40.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:39 vm09 bash[34466]: cluster 2026-04-15T13:37:38.462520+0000 mgr.vm06.qbbldl (mgr.14229) 264 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:40.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:39 vm09 bash[34466]: cluster 2026-04-15T13:37:38.462520+0000 mgr.vm06.qbbldl (mgr.14229) 264 : cluster [DBG] pgmap v129: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:40.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:39 vm09 bash[34466]: audit 2026-04-15T13:37:39.247709+0000 mgr.vm06.qbbldl (mgr.14229) 265 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:40.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:39 vm09 bash[34466]: audit 2026-04-15T13:37:39.247709+0000 mgr.vm06.qbbldl (mgr.14229) 265 : audit [DBG] from='client.15020 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:41.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:40 vm06 bash[28114]: audit 2026-04-15T13:37:39.444660+0000 mgr.vm06.qbbldl (mgr.14229) 266 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:41.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:40 vm06 bash[28114]: audit 2026-04-15T13:37:39.444660+0000 mgr.vm06.qbbldl (mgr.14229) 266 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:41.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:40 vm06 bash[28114]: audit 2026-04-15T13:37:39.696987+0000 mon.vm06 (mon.0) 965 : audit [DBG] from='client.? 192.168.123.106:0/2795681115' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:41.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:40 vm06 bash[28114]: audit 2026-04-15T13:37:39.696987+0000 mon.vm06 (mon.0) 965 : audit [DBG] from='client.? 192.168.123.106:0/2795681115' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:41.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:40 vm09 bash[34466]: audit 2026-04-15T13:37:39.444660+0000 mgr.vm06.qbbldl (mgr.14229) 266 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:41.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:40 vm09 bash[34466]: audit 2026-04-15T13:37:39.444660+0000 mgr.vm06.qbbldl (mgr.14229) 266 : audit [DBG] from='client.15024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:41.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:40 vm09 bash[34466]: audit 2026-04-15T13:37:39.696987+0000 mon.vm06 (mon.0) 965 : audit [DBG] from='client.? 192.168.123.106:0/2795681115' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:41.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:40 vm09 bash[34466]: audit 2026-04-15T13:37:39.696987+0000 mon.vm06 (mon.0) 965 : audit [DBG] from='client.? 192.168.123.106:0/2795681115' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:42.013 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:41 vm06 bash[28114]: cluster 2026-04-15T13:37:40.462941+0000 mgr.vm06.qbbldl (mgr.14229) 267 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:42.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:41 vm06 bash[28114]: cluster 2026-04-15T13:37:40.462941+0000 mgr.vm06.qbbldl (mgr.14229) 267 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:42.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:41 vm09 bash[34466]: cluster 2026-04-15T13:37:40.462941+0000 mgr.vm06.qbbldl (mgr.14229) 267 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:42.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:41 vm09 bash[34466]: cluster 2026-04-15T13:37:40.462941+0000 mgr.vm06.qbbldl (mgr.14229) 267 : cluster [DBG] pgmap v130: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:44.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:43 vm06 bash[28114]: cluster 2026-04-15T13:37:42.463232+0000 mgr.vm06.qbbldl (mgr.14229) 268 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:44.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:43 vm06 bash[28114]: cluster 2026-04-15T13:37:42.463232+0000 mgr.vm06.qbbldl (mgr.14229) 268 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:44.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:43 vm09 bash[34466]: cluster 2026-04-15T13:37:42.463232+0000 mgr.vm06.qbbldl (mgr.14229) 268 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:44.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:43 vm09 bash[34466]: cluster 2026-04-15T13:37:42.463232+0000 mgr.vm06.qbbldl (mgr.14229) 268 : cluster [DBG] pgmap v131: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:44.909 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:45.106 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:45.106 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (85s) 52s ago 2m 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:45.106 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 52s ago 2m - - 2026-04-15T13:37:45.106 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 53s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:45.106 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 53s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:45.342 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:45.342 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:45.342 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: cluster 2026-04-15T13:37:44.463664+0000 mgr.vm06.qbbldl (mgr.14229) 269 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: cluster 2026-04-15T13:37:44.463664+0000 mgr.vm06.qbbldl (mgr.14229) 269 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: audit 2026-04-15T13:37:44.889974+0000 mgr.vm06.qbbldl (mgr.14229) 270 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: audit 2026-04-15T13:37:44.889974+0000 mgr.vm06.qbbldl (mgr.14229) 270 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: audit 2026-04-15T13:37:45.102273+0000 mgr.vm06.qbbldl (mgr.14229) 271 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: audit 2026-04-15T13:37:45.102273+0000 mgr.vm06.qbbldl (mgr.14229) 271 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: audit 2026-04-15T13:37:45.341106+0000 mon.vm06 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.106:0/1171030436' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:46.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:45 vm06 bash[28114]: audit 2026-04-15T13:37:45.341106+0000 mon.vm06 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.106:0/1171030436' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:46.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: cluster 2026-04-15T13:37:44.463664+0000 mgr.vm06.qbbldl (mgr.14229) 269 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:46.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: cluster 2026-04-15T13:37:44.463664+0000 mgr.vm06.qbbldl (mgr.14229) 269 : cluster [DBG] pgmap v132: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:46.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: audit 2026-04-15T13:37:44.889974+0000 mgr.vm06.qbbldl (mgr.14229) 270 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: audit 2026-04-15T13:37:44.889974+0000 mgr.vm06.qbbldl (mgr.14229) 270 : audit [DBG] from='client.15032 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: audit 2026-04-15T13:37:45.102273+0000 mgr.vm06.qbbldl (mgr.14229) 271 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: audit 2026-04-15T13:37:45.102273+0000 mgr.vm06.qbbldl (mgr.14229) 271 : audit [DBG] from='client.15036 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: audit 2026-04-15T13:37:45.341106+0000 mon.vm06 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.106:0/1171030436' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:45 vm09 bash[34466]: audit 2026-04-15T13:37:45.341106+0000 mon.vm06 (mon.0) 966 : audit [DBG] from='client.? 192.168.123.106:0/1171030436' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:48.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:47 vm06 bash[28114]: cluster 2026-04-15T13:37:46.464119+0000 mgr.vm06.qbbldl (mgr.14229) 272 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:48.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:47 vm06 bash[28114]: cluster 2026-04-15T13:37:46.464119+0000 mgr.vm06.qbbldl (mgr.14229) 272 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:48.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:47 vm09 bash[34466]: cluster 2026-04-15T13:37:46.464119+0000 mgr.vm06.qbbldl (mgr.14229) 272 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:48.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:47 vm09 bash[34466]: cluster 2026-04-15T13:37:46.464119+0000 mgr.vm06.qbbldl (mgr.14229) 272 : cluster [DBG] pgmap v133: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:50.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:49 vm06 bash[28114]: cluster 2026-04-15T13:37:48.464527+0000 mgr.vm06.qbbldl (mgr.14229) 273 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:50.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:49 vm06 bash[28114]: cluster 2026-04-15T13:37:48.464527+0000 mgr.vm06.qbbldl (mgr.14229) 273 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:49 vm09 bash[34466]: cluster 2026-04-15T13:37:48.464527+0000 mgr.vm06.qbbldl (mgr.14229) 273 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:49 vm09 bash[34466]: cluster 2026-04-15T13:37:48.464527+0000 mgr.vm06.qbbldl (mgr.14229) 273 : cluster [DBG] pgmap v134: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:50.575 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:50.771 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:50.771 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (91s) 58s ago 2m 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:50.771 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 58s ago 2m - - 2026-04-15T13:37:50.771 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 58s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:50.771 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 58s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:51.026 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:51.026 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:51.026 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: cluster 2026-04-15T13:37:50.465104+0000 mgr.vm06.qbbldl (mgr.14229) 274 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: cluster 2026-04-15T13:37:50.465104+0000 mgr.vm06.qbbldl (mgr.14229) 274 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: audit 2026-04-15T13:37:50.553967+0000 mgr.vm06.qbbldl (mgr.14229) 275 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: audit 2026-04-15T13:37:50.553967+0000 mgr.vm06.qbbldl (mgr.14229) 275 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: audit 2026-04-15T13:37:50.765186+0000 mgr.vm06.qbbldl (mgr.14229) 276 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: audit 2026-04-15T13:37:50.765186+0000 mgr.vm06.qbbldl (mgr.14229) 276 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: audit 2026-04-15T13:37:51.024671+0000 mon.vm06 (mon.0) 967 : audit [DBG] from='client.? 192.168.123.106:0/3310720199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:52.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:51 vm06 bash[28114]: audit 2026-04-15T13:37:51.024671+0000 mon.vm06 (mon.0) 967 : audit [DBG] from='client.? 192.168.123.106:0/3310720199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: cluster 2026-04-15T13:37:50.465104+0000 mgr.vm06.qbbldl (mgr.14229) 274 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: cluster 2026-04-15T13:37:50.465104+0000 mgr.vm06.qbbldl (mgr.14229) 274 : cluster [DBG] pgmap v135: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: audit 2026-04-15T13:37:50.553967+0000 mgr.vm06.qbbldl (mgr.14229) 275 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: audit 2026-04-15T13:37:50.553967+0000 mgr.vm06.qbbldl (mgr.14229) 275 : audit [DBG] from='client.15044 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: audit 2026-04-15T13:37:50.765186+0000 mgr.vm06.qbbldl (mgr.14229) 276 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: audit 2026-04-15T13:37:50.765186+0000 mgr.vm06.qbbldl (mgr.14229) 276 : audit [DBG] from='client.15048 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: audit 2026-04-15T13:37:51.024671+0000 mon.vm06 (mon.0) 967 : audit [DBG] from='client.? 192.168.123.106:0/3310720199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:51 vm09 bash[34466]: audit 2026-04-15T13:37:51.024671+0000 mon.vm06 (mon.0) 967 : audit [DBG] from='client.? 192.168.123.106:0/3310720199' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:53.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:52 vm06 bash[28114]: audit 2026-04-15T13:37:52.474589+0000 mon.vm06 (mon.0) 968 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:37:53.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:52 vm06 bash[28114]: audit 2026-04-15T13:37:52.474589+0000 mon.vm06 (mon.0) 968 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:37:53.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:52 vm09 bash[34466]: audit 2026-04-15T13:37:52.474589+0000 mon.vm06 (mon.0) 968 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:37:53.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:52 vm09 bash[34466]: audit 2026-04-15T13:37:52.474589+0000 mon.vm06 (mon.0) 968 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:37:54.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:53 vm06 bash[28114]: cluster 2026-04-15T13:37:52.465503+0000 mgr.vm06.qbbldl (mgr.14229) 277 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:54.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:53 vm06 bash[28114]: cluster 2026-04-15T13:37:52.465503+0000 mgr.vm06.qbbldl (mgr.14229) 277 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:54.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:53 vm06 bash[28114]: audit 2026-04-15T13:37:53.483981+0000 mon.vm06 (mon.0) 969 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:54.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:53 vm06 bash[28114]: audit 2026-04-15T13:37:53.483981+0000 mon.vm06 (mon.0) 969 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:54.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:53 vm09 bash[34466]: cluster 2026-04-15T13:37:52.465503+0000 mgr.vm06.qbbldl (mgr.14229) 277 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:53 vm09 bash[34466]: cluster 2026-04-15T13:37:52.465503+0000 mgr.vm06.qbbldl (mgr.14229) 277 : cluster [DBG] pgmap v136: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:37:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:53 vm09 bash[34466]: audit 2026-04-15T13:37:53.483981+0000 mon.vm06 (mon.0) 969 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:53 vm09 bash[34466]: audit 2026-04-15T13:37:53.483981+0000 mon.vm06 (mon.0) 969 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:37:56.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:55 vm06 bash[28114]: cluster 2026-04-15T13:37:54.466015+0000 mgr.vm06.qbbldl (mgr.14229) 278 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:56.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:55 vm06 bash[28114]: cluster 2026-04-15T13:37:54.466015+0000 mgr.vm06.qbbldl (mgr.14229) 278 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:56.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:55 vm09 bash[34466]: cluster 2026-04-15T13:37:54.466015+0000 mgr.vm06.qbbldl (mgr.14229) 278 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:56.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:55 vm09 bash[34466]: cluster 2026-04-15T13:37:54.466015+0000 mgr.vm06.qbbldl (mgr.14229) 278 : cluster [DBG] pgmap v137: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:56.246 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:37:56.440 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:37:56.440 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (96s) 64s ago 2m 95.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:37:56.440 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 64s ago 2m - - 2026-04-15T13:37:56.440 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 64s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:37:56.440 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 64s ago 2m 101M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:37:56.692 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:37:56.692 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:37:56.692 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:37:57.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:56 vm06 bash[28114]: audit 2026-04-15T13:37:56.224644+0000 mgr.vm06.qbbldl (mgr.14229) 279 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:57.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:56 vm06 bash[28114]: audit 2026-04-15T13:37:56.224644+0000 mgr.vm06.qbbldl (mgr.14229) 279 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:57.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:56 vm06 bash[28114]: audit 2026-04-15T13:37:56.691004+0000 mon.vm06 (mon.0) 970 : audit [DBG] from='client.? 192.168.123.106:0/2281827220' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:57.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:56 vm06 bash[28114]: audit 2026-04-15T13:37:56.691004+0000 mon.vm06 (mon.0) 970 : audit [DBG] from='client.? 192.168.123.106:0/2281827220' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:57.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:56 vm09 bash[34466]: audit 2026-04-15T13:37:56.224644+0000 mgr.vm06.qbbldl (mgr.14229) 279 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:57.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:56 vm09 bash[34466]: audit 2026-04-15T13:37:56.224644+0000 mgr.vm06.qbbldl (mgr.14229) 279 : audit [DBG] from='client.15056 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:57.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:56 vm09 bash[34466]: audit 2026-04-15T13:37:56.691004+0000 mon.vm06 (mon.0) 970 : audit [DBG] from='client.? 192.168.123.106:0/2281827220' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:57.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:56 vm09 bash[34466]: audit 2026-04-15T13:37:56.691004+0000 mon.vm06 (mon.0) 970 : audit [DBG] from='client.? 192.168.123.106:0/2281827220' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:56.434655+0000 mgr.vm06.qbbldl (mgr.14229) 280 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:56.434655+0000 mgr.vm06.qbbldl (mgr.14229) 280 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: cluster 2026-04-15T13:37:56.466668+0000 mgr.vm06.qbbldl (mgr.14229) 281 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: cluster 2026-04-15T13:37:56.466668+0000 mgr.vm06.qbbldl (mgr.14229) 281 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.367166+0000 mon.vm06 (mon.0) 971 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.367166+0000 mon.vm06 (mon.0) 971 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.428463+0000 mon.vm06 (mon.0) 972 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.428463+0000 mon.vm06 (mon.0) 972 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.968458+0000 mon.vm06 (mon.0) 973 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.968458+0000 mon.vm06 (mon.0) 973 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.974861+0000 mon.vm06 (mon.0) 974 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:57.974861+0000 mon.vm06 (mon.0) 974 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.344469+0000 mon.vm06 (mon.0) 975 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:37:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.344469+0000 mon.vm06 (mon.0) 975 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:37:58.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.345335+0000 mon.vm06 (mon.0) 976 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:37:58.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.345335+0000 mon.vm06 (mon.0) 976 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:37:58.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.351588+0000 mon.vm06 (mon.0) 977 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.351588+0000 mon.vm06 (mon.0) 977 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.353588+0000 mon.vm06 (mon.0) 978 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:37:58.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:58 vm06 bash[28114]: audit 2026-04-15T13:37:58.353588+0000 mon.vm06 (mon.0) 978 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:56.434655+0000 mgr.vm06.qbbldl (mgr.14229) 280 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:56.434655+0000 mgr.vm06.qbbldl (mgr.14229) 280 : audit [DBG] from='client.15060 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: cluster 2026-04-15T13:37:56.466668+0000 mgr.vm06.qbbldl (mgr.14229) 281 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: cluster 2026-04-15T13:37:56.466668+0000 mgr.vm06.qbbldl (mgr.14229) 281 : cluster [DBG] pgmap v138: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.367166+0000 mon.vm06 (mon.0) 971 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.367166+0000 mon.vm06 (mon.0) 971 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.428463+0000 mon.vm06 (mon.0) 972 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.428463+0000 mon.vm06 (mon.0) 972 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.968458+0000 mon.vm06 (mon.0) 973 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.968458+0000 mon.vm06 (mon.0) 973 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.974861+0000 mon.vm06 (mon.0) 974 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:57.974861+0000 mon.vm06 (mon.0) 974 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.344469+0000 mon.vm06 (mon.0) 975 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.344469+0000 mon.vm06 (mon.0) 975 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.345335+0000 mon.vm06 (mon.0) 976 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.345335+0000 mon.vm06 (mon.0) 976 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.351588+0000 mon.vm06 (mon.0) 977 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.351588+0000 mon.vm06 (mon.0) 977 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.353588+0000 mon.vm06 (mon.0) 978 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:37:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:58 vm09 bash[34466]: audit 2026-04-15T13:37:58.353588+0000 mon.vm06 (mon.0) 978 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:37:59.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:59 vm06 bash[28114]: cluster 2026-04-15T13:37:58.346734+0000 mgr.vm06.qbbldl (mgr.14229) 282 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 172 B/s wr, 0 op/s 2026-04-15T13:37:59.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:59 vm06 bash[28114]: cluster 2026-04-15T13:37:58.346734+0000 mgr.vm06.qbbldl (mgr.14229) 282 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 172 B/s wr, 0 op/s 2026-04-15T13:37:59.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:59 vm06 bash[28114]: cluster 2026-04-15T13:37:58.346867+0000 mgr.vm06.qbbldl (mgr.14229) 283 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:37:59.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:37:59 vm06 bash[28114]: cluster 2026-04-15T13:37:58.346867+0000 mgr.vm06.qbbldl (mgr.14229) 283 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:37:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:59 vm09 bash[34466]: cluster 2026-04-15T13:37:58.346734+0000 mgr.vm06.qbbldl (mgr.14229) 282 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 172 B/s wr, 0 op/s 2026-04-15T13:37:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:59 vm09 bash[34466]: cluster 2026-04-15T13:37:58.346734+0000 mgr.vm06.qbbldl (mgr.14229) 282 : cluster [DBG] pgmap v139: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 172 B/s wr, 0 op/s 2026-04-15T13:37:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:59 vm09 bash[34466]: cluster 2026-04-15T13:37:58.346867+0000 mgr.vm06.qbbldl (mgr.14229) 283 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:37:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:37:59 vm09 bash[34466]: cluster 2026-04-15T13:37:58.346867+0000 mgr.vm06.qbbldl (mgr.14229) 283 : cluster [DBG] pgmap v140: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:01.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:01 vm06 bash[28114]: cluster 2026-04-15T13:38:00.347346+0000 mgr.vm06.qbbldl (mgr.14229) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:01.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:01 vm06 bash[28114]: cluster 2026-04-15T13:38:00.347346+0000 mgr.vm06.qbbldl (mgr.14229) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:01.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:01 vm09 bash[34466]: cluster 2026-04-15T13:38:00.347346+0000 mgr.vm06.qbbldl (mgr.14229) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:01 vm09 bash[34466]: cluster 2026-04-15T13:38:00.347346+0000 mgr.vm06.qbbldl (mgr.14229) 284 : cluster [DBG] pgmap v141: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:01.934 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:02.132 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:02.132 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (102s) 4s ago 2m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:02.133 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 4s ago 2m - - 2026-04-15T13:38:02.133 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 4s ago 2m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:02.133 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 4s ago 2m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:02.379 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:02.379 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:02.379 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:02.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:02 vm06 bash[28114]: audit 2026-04-15T13:38:02.378386+0000 mon.vm06 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.106:0/2087166236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:02.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:02 vm06 bash[28114]: audit 2026-04-15T13:38:02.378386+0000 mon.vm06 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.106:0/2087166236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:02 vm09 bash[34466]: audit 2026-04-15T13:38:02.378386+0000 mon.vm06 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.106:0/2087166236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:02 vm09 bash[34466]: audit 2026-04-15T13:38:02.378386+0000 mon.vm06 (mon.0) 979 : audit [DBG] from='client.? 192.168.123.106:0/2087166236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:03.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:03 vm06 bash[28114]: audit 2026-04-15T13:38:01.913777+0000 mgr.vm06.qbbldl (mgr.14229) 285 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:03 vm06 bash[28114]: audit 2026-04-15T13:38:01.913777+0000 mgr.vm06.qbbldl (mgr.14229) 285 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:03 vm06 bash[28114]: audit 2026-04-15T13:38:02.128209+0000 mgr.vm06.qbbldl (mgr.14229) 286 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:03 vm06 bash[28114]: audit 2026-04-15T13:38:02.128209+0000 mgr.vm06.qbbldl (mgr.14229) 286 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:03 vm06 bash[28114]: cluster 2026-04-15T13:38:02.347775+0000 mgr.vm06.qbbldl (mgr.14229) 287 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:03.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:03 vm06 bash[28114]: cluster 2026-04-15T13:38:02.347775+0000 mgr.vm06.qbbldl (mgr.14229) 287 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:03.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:03 vm09 bash[34466]: audit 2026-04-15T13:38:01.913777+0000 mgr.vm06.qbbldl (mgr.14229) 285 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:03 vm09 bash[34466]: audit 2026-04-15T13:38:01.913777+0000 mgr.vm06.qbbldl (mgr.14229) 285 : audit [DBG] from='client.15068 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:03 vm09 bash[34466]: audit 2026-04-15T13:38:02.128209+0000 mgr.vm06.qbbldl (mgr.14229) 286 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:03 vm09 bash[34466]: audit 2026-04-15T13:38:02.128209+0000 mgr.vm06.qbbldl (mgr.14229) 286 : audit [DBG] from='client.15072 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:03 vm09 bash[34466]: cluster 2026-04-15T13:38:02.347775+0000 mgr.vm06.qbbldl (mgr.14229) 287 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:03 vm09 bash[34466]: cluster 2026-04-15T13:38:02.347775+0000 mgr.vm06.qbbldl (mgr.14229) 287 : cluster [DBG] pgmap v142: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 103 B/s rd, 207 B/s wr, 0 op/s 2026-04-15T13:38:04.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:04 vm06 bash[28114]: cluster 2026-04-15T13:38:04.348220+0000 mgr.vm06.qbbldl (mgr.14229) 288 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:04.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:04 vm06 bash[28114]: cluster 2026-04-15T13:38:04.348220+0000 mgr.vm06.qbbldl (mgr.14229) 288 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:04.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:04 vm09 bash[34466]: cluster 2026-04-15T13:38:04.348220+0000 mgr.vm06.qbbldl (mgr.14229) 288 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:04.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:04 vm09 bash[34466]: cluster 2026-04-15T13:38:04.348220+0000 mgr.vm06.qbbldl (mgr.14229) 288 : cluster [DBG] pgmap v143: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:06.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:06 vm06 bash[28114]: cluster 2026-04-15T13:38:06.348765+0000 mgr.vm06.qbbldl (mgr.14229) 289 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:06.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:06 vm06 bash[28114]: cluster 2026-04-15T13:38:06.348765+0000 mgr.vm06.qbbldl (mgr.14229) 289 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:06.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:06 vm09 bash[34466]: cluster 2026-04-15T13:38:06.348765+0000 mgr.vm06.qbbldl (mgr.14229) 289 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:06 vm09 bash[34466]: cluster 2026-04-15T13:38:06.348765+0000 mgr.vm06.qbbldl (mgr.14229) 289 : cluster [DBG] pgmap v144: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:07.605 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:07.805 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:07.805 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (108s) 9s ago 2m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:07.805 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 9s ago 2m - - 2026-04-15T13:38:07.805 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 10s ago 2m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:07.805 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 10s ago 2m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:08.046 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:08.046 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:08.046 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:08 vm09 bash[34466]: audit 2026-04-15T13:38:08.044927+0000 mon.vm06 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.106:0/1629940559' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:08.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:08 vm09 bash[34466]: audit 2026-04-15T13:38:08.044927+0000 mon.vm06 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.106:0/1629940559' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:08.514 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:08 vm06 bash[28114]: audit 2026-04-15T13:38:08.044927+0000 mon.vm06 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.106:0/1629940559' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:08.514 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:08 vm06 bash[28114]: audit 2026-04-15T13:38:08.044927+0000 mon.vm06 (mon.0) 980 : audit [DBG] from='client.? 192.168.123.106:0/1629940559' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: audit 2026-04-15T13:38:07.586207+0000 mgr.vm06.qbbldl (mgr.14229) 290 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: audit 2026-04-15T13:38:07.586207+0000 mgr.vm06.qbbldl (mgr.14229) 290 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: audit 2026-04-15T13:38:07.800808+0000 mgr.vm06.qbbldl (mgr.14229) 291 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: audit 2026-04-15T13:38:07.800808+0000 mgr.vm06.qbbldl (mgr.14229) 291 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: cluster 2026-04-15T13:38:08.349247+0000 mgr.vm06.qbbldl (mgr.14229) 292 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: cluster 2026-04-15T13:38:08.349247+0000 mgr.vm06.qbbldl (mgr.14229) 292 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: audit 2026-04-15T13:38:08.484028+0000 mon.vm06 (mon.0) 981 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:09.264 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:09 vm06 bash[28114]: audit 2026-04-15T13:38:08.484028+0000 mon.vm06 (mon.0) 981 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: audit 2026-04-15T13:38:07.586207+0000 mgr.vm06.qbbldl (mgr.14229) 290 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: audit 2026-04-15T13:38:07.586207+0000 mgr.vm06.qbbldl (mgr.14229) 290 : audit [DBG] from='client.15080 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: audit 2026-04-15T13:38:07.800808+0000 mgr.vm06.qbbldl (mgr.14229) 291 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: audit 2026-04-15T13:38:07.800808+0000 mgr.vm06.qbbldl (mgr.14229) 291 : audit [DBG] from='client.15084 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: cluster 2026-04-15T13:38:08.349247+0000 mgr.vm06.qbbldl (mgr.14229) 292 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: cluster 2026-04-15T13:38:08.349247+0000 mgr.vm06.qbbldl (mgr.14229) 292 : cluster [DBG] pgmap v145: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: audit 2026-04-15T13:38:08.484028+0000 mon.vm06 (mon.0) 981 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:09.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:09 vm09 bash[34466]: audit 2026-04-15T13:38:08.484028+0000 mon.vm06 (mon.0) 981 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:10.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:10 vm06 bash[28114]: cluster 2026-04-15T13:38:10.349795+0000 mgr.vm06.qbbldl (mgr.14229) 293 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:10.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:10 vm06 bash[28114]: cluster 2026-04-15T13:38:10.349795+0000 mgr.vm06.qbbldl (mgr.14229) 293 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:10.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:10 vm09 bash[34466]: cluster 2026-04-15T13:38:10.349795+0000 mgr.vm06.qbbldl (mgr.14229) 293 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:10.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:10 vm09 bash[34466]: cluster 2026-04-15T13:38:10.349795+0000 mgr.vm06.qbbldl (mgr.14229) 293 : cluster [DBG] pgmap v146: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:12.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:12 vm06 bash[28114]: cluster 2026-04-15T13:38:12.350220+0000 mgr.vm06.qbbldl (mgr.14229) 294 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:12.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:12 vm06 bash[28114]: cluster 2026-04-15T13:38:12.350220+0000 mgr.vm06.qbbldl (mgr.14229) 294 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:12.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:12 vm09 bash[34466]: cluster 2026-04-15T13:38:12.350220+0000 mgr.vm06.qbbldl (mgr.14229) 294 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:12.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:12 vm09 bash[34466]: cluster 2026-04-15T13:38:12.350220+0000 mgr.vm06.qbbldl (mgr.14229) 294 : cluster [DBG] pgmap v147: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:13.285 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:13.480 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:13.480 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (114s) 15s ago 2m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:13.480 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 15s ago 2m - - 2026-04-15T13:38:13.480 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 16s ago 2m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:13.480 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 16s ago 2m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:13.740 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:13.740 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:13.740 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:13.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:13 vm06 bash[28114]: audit 2026-04-15T13:38:13.257082+0000 mgr.vm06.qbbldl (mgr.14229) 295 : audit [DBG] from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:13.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:13 vm06 bash[28114]: audit 2026-04-15T13:38:13.257082+0000 mgr.vm06.qbbldl (mgr.14229) 295 : audit [DBG] from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:13.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:13 vm09 bash[34466]: audit 2026-04-15T13:38:13.257082+0000 mgr.vm06.qbbldl (mgr.14229) 295 : audit [DBG] from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:13 vm09 bash[34466]: audit 2026-04-15T13:38:13.257082+0000 mgr.vm06.qbbldl (mgr.14229) 295 : audit [DBG] from='client.15092 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:14.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:14 vm06 bash[28114]: audit 2026-04-15T13:38:13.474006+0000 mgr.vm06.qbbldl (mgr.14229) 296 : audit [DBG] from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:14.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:14 vm06 bash[28114]: audit 2026-04-15T13:38:13.474006+0000 mgr.vm06.qbbldl (mgr.14229) 296 : audit [DBG] from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:14.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:14 vm06 bash[28114]: audit 2026-04-15T13:38:13.738734+0000 mon.vm06 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.106:0/3467055678' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:14.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:14 vm06 bash[28114]: audit 2026-04-15T13:38:13.738734+0000 mon.vm06 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.106:0/3467055678' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:14.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:14 vm06 bash[28114]: cluster 2026-04-15T13:38:14.350753+0000 mgr.vm06.qbbldl (mgr.14229) 297 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:14.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:14 vm06 bash[28114]: cluster 2026-04-15T13:38:14.350753+0000 mgr.vm06.qbbldl (mgr.14229) 297 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:14.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:14 vm09 bash[34466]: audit 2026-04-15T13:38:13.474006+0000 mgr.vm06.qbbldl (mgr.14229) 296 : audit [DBG] from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:14 vm09 bash[34466]: audit 2026-04-15T13:38:13.474006+0000 mgr.vm06.qbbldl (mgr.14229) 296 : audit [DBG] from='client.15096 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:14 vm09 bash[34466]: audit 2026-04-15T13:38:13.738734+0000 mon.vm06 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.106:0/3467055678' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:14 vm09 bash[34466]: audit 2026-04-15T13:38:13.738734+0000 mon.vm06 (mon.0) 982 : audit [DBG] from='client.? 192.168.123.106:0/3467055678' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:14 vm09 bash[34466]: cluster 2026-04-15T13:38:14.350753+0000 mgr.vm06.qbbldl (mgr.14229) 297 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:14 vm09 bash[34466]: cluster 2026-04-15T13:38:14.350753+0000 mgr.vm06.qbbldl (mgr.14229) 297 : cluster [DBG] pgmap v148: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:16.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:16 vm06 bash[28114]: cluster 2026-04-15T13:38:16.351224+0000 mgr.vm06.qbbldl (mgr.14229) 298 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:16.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:16 vm06 bash[28114]: cluster 2026-04-15T13:38:16.351224+0000 mgr.vm06.qbbldl (mgr.14229) 298 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:16.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:16 vm09 bash[34466]: cluster 2026-04-15T13:38:16.351224+0000 mgr.vm06.qbbldl (mgr.14229) 298 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:16.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:16 vm09 bash[34466]: cluster 2026-04-15T13:38:16.351224+0000 mgr.vm06.qbbldl (mgr.14229) 298 : cluster [DBG] pgmap v149: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:18.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:18 vm06 bash[28114]: cluster 2026-04-15T13:38:18.351676+0000 mgr.vm06.qbbldl (mgr.14229) 299 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:18.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:18 vm06 bash[28114]: cluster 2026-04-15T13:38:18.351676+0000 mgr.vm06.qbbldl (mgr.14229) 299 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:18.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:18 vm09 bash[34466]: cluster 2026-04-15T13:38:18.351676+0000 mgr.vm06.qbbldl (mgr.14229) 299 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:18.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:18 vm09 bash[34466]: cluster 2026-04-15T13:38:18.351676+0000 mgr.vm06.qbbldl (mgr.14229) 299 : cluster [DBG] pgmap v150: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:18.955 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:19.158 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:19.158 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (119s) 21s ago 2m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:19.158 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 21s ago 2m - - 2026-04-15T13:38:19.158 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 21s ago 2m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:19.158 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 21s ago 2m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:19.401 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:19.401 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:19.401 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:19.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:19 vm06 bash[28114]: audit 2026-04-15T13:38:18.935614+0000 mgr.vm06.qbbldl (mgr.14229) 300 : audit [DBG] from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:19 vm06 bash[28114]: audit 2026-04-15T13:38:18.935614+0000 mgr.vm06.qbbldl (mgr.14229) 300 : audit [DBG] from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:19 vm06 bash[28114]: audit 2026-04-15T13:38:19.153926+0000 mgr.vm06.qbbldl (mgr.14229) 301 : audit [DBG] from='client.15108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:19 vm06 bash[28114]: audit 2026-04-15T13:38:19.153926+0000 mgr.vm06.qbbldl (mgr.14229) 301 : audit [DBG] from='client.15108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:19 vm06 bash[28114]: audit 2026-04-15T13:38:19.399393+0000 mon.vm06 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.106:0/281635047' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:19.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:19 vm06 bash[28114]: audit 2026-04-15T13:38:19.399393+0000 mon.vm06 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.106:0/281635047' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:19 vm09 bash[34466]: audit 2026-04-15T13:38:18.935614+0000 mgr.vm06.qbbldl (mgr.14229) 300 : audit [DBG] from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:19 vm09 bash[34466]: audit 2026-04-15T13:38:18.935614+0000 mgr.vm06.qbbldl (mgr.14229) 300 : audit [DBG] from='client.15104 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:19 vm09 bash[34466]: audit 2026-04-15T13:38:19.153926+0000 mgr.vm06.qbbldl (mgr.14229) 301 : audit [DBG] from='client.15108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:19 vm09 bash[34466]: audit 2026-04-15T13:38:19.153926+0000 mgr.vm06.qbbldl (mgr.14229) 301 : audit [DBG] from='client.15108 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:19 vm09 bash[34466]: audit 2026-04-15T13:38:19.399393+0000 mon.vm06 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.106:0/281635047' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:19 vm09 bash[34466]: audit 2026-04-15T13:38:19.399393+0000 mon.vm06 (mon.0) 983 : audit [DBG] from='client.? 192.168.123.106:0/281635047' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:20.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:20 vm06 bash[28114]: cluster 2026-04-15T13:38:20.352189+0000 mgr.vm06.qbbldl (mgr.14229) 302 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:20.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:20 vm06 bash[28114]: cluster 2026-04-15T13:38:20.352189+0000 mgr.vm06.qbbldl (mgr.14229) 302 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:20.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:20 vm09 bash[34466]: cluster 2026-04-15T13:38:20.352189+0000 mgr.vm06.qbbldl (mgr.14229) 302 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:20.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:20 vm09 bash[34466]: cluster 2026-04-15T13:38:20.352189+0000 mgr.vm06.qbbldl (mgr.14229) 302 : cluster [DBG] pgmap v151: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:22.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:22 vm06 bash[28114]: cluster 2026-04-15T13:38:22.352609+0000 mgr.vm06.qbbldl (mgr.14229) 303 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:22.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:22 vm06 bash[28114]: cluster 2026-04-15T13:38:22.352609+0000 mgr.vm06.qbbldl (mgr.14229) 303 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:22.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:22 vm09 bash[34466]: cluster 2026-04-15T13:38:22.352609+0000 mgr.vm06.qbbldl (mgr.14229) 303 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:22.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:22 vm09 bash[34466]: cluster 2026-04-15T13:38:22.352609+0000 mgr.vm06.qbbldl (mgr.14229) 303 : cluster [DBG] pgmap v152: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:23.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:23 vm09 bash[34466]: audit 2026-04-15T13:38:23.484262+0000 mon.vm06 (mon.0) 984 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:23 vm09 bash[34466]: audit 2026-04-15T13:38:23.484262+0000 mon.vm06 (mon.0) 984 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:24.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:23 vm06 bash[28114]: audit 2026-04-15T13:38:23.484262+0000 mon.vm06 (mon.0) 984 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:24.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:23 vm06 bash[28114]: audit 2026-04-15T13:38:23.484262+0000 mon.vm06 (mon.0) 984 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:24.608 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:24.790 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:24.790 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 26s ago 2m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:24.790 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 26s ago 2m - - 2026-04-15T13:38:24.790 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 27s ago 2m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:24.790 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (2m) 27s ago 2m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:24 vm09 bash[34466]: cluster 2026-04-15T13:38:24.352990+0000 mgr.vm06.qbbldl (mgr.14229) 304 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:24 vm09 bash[34466]: cluster 2026-04-15T13:38:24.352990+0000 mgr.vm06.qbbldl (mgr.14229) 304 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:25.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:24 vm06 bash[28114]: cluster 2026-04-15T13:38:24.352990+0000 mgr.vm06.qbbldl (mgr.14229) 304 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:25.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:24 vm06 bash[28114]: cluster 2026-04-15T13:38:24.352990+0000 mgr.vm06.qbbldl (mgr.14229) 304 : cluster [DBG] pgmap v153: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:25.019 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:25.019 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:25.019 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:25.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:25 vm09 bash[34466]: audit 2026-04-15T13:38:24.590499+0000 mgr.vm06.qbbldl (mgr.14229) 305 : audit [DBG] from='client.15116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:25 vm09 bash[34466]: audit 2026-04-15T13:38:24.590499+0000 mgr.vm06.qbbldl (mgr.14229) 305 : audit [DBG] from='client.15116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:25 vm09 bash[34466]: audit 2026-04-15T13:38:24.785699+0000 mgr.vm06.qbbldl (mgr.14229) 306 : audit [DBG] from='client.15120 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:25 vm09 bash[34466]: audit 2026-04-15T13:38:24.785699+0000 mgr.vm06.qbbldl (mgr.14229) 306 : audit [DBG] from='client.15120 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:25 vm09 bash[34466]: audit 2026-04-15T13:38:25.017668+0000 mon.vm06 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.106:0/1322363412' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:25 vm09 bash[34466]: audit 2026-04-15T13:38:25.017668+0000 mon.vm06 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.106:0/1322363412' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:26.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:25 vm06 bash[28114]: audit 2026-04-15T13:38:24.590499+0000 mgr.vm06.qbbldl (mgr.14229) 305 : audit [DBG] from='client.15116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:26.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:25 vm06 bash[28114]: audit 2026-04-15T13:38:24.590499+0000 mgr.vm06.qbbldl (mgr.14229) 305 : audit [DBG] from='client.15116 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:26.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:25 vm06 bash[28114]: audit 2026-04-15T13:38:24.785699+0000 mgr.vm06.qbbldl (mgr.14229) 306 : audit [DBG] from='client.15120 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:26.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:25 vm06 bash[28114]: audit 2026-04-15T13:38:24.785699+0000 mgr.vm06.qbbldl (mgr.14229) 306 : audit [DBG] from='client.15120 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:26.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:25 vm06 bash[28114]: audit 2026-04-15T13:38:25.017668+0000 mon.vm06 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.106:0/1322363412' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:26.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:25 vm06 bash[28114]: audit 2026-04-15T13:38:25.017668+0000 mon.vm06 (mon.0) 985 : audit [DBG] from='client.? 192.168.123.106:0/1322363412' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:26.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:26 vm09 bash[34466]: cluster 2026-04-15T13:38:26.353519+0000 mgr.vm06.qbbldl (mgr.14229) 307 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:26.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:26 vm09 bash[34466]: cluster 2026-04-15T13:38:26.353519+0000 mgr.vm06.qbbldl (mgr.14229) 307 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:27.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:26 vm06 bash[28114]: cluster 2026-04-15T13:38:26.353519+0000 mgr.vm06.qbbldl (mgr.14229) 307 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:27.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:26 vm06 bash[28114]: cluster 2026-04-15T13:38:26.353519+0000 mgr.vm06.qbbldl (mgr.14229) 307 : cluster [DBG] pgmap v154: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:28.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:28 vm06 bash[28114]: cluster 2026-04-15T13:38:28.353932+0000 mgr.vm06.qbbldl (mgr.14229) 308 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:28.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:28 vm06 bash[28114]: cluster 2026-04-15T13:38:28.353932+0000 mgr.vm06.qbbldl (mgr.14229) 308 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:28.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:28 vm09 bash[34466]: cluster 2026-04-15T13:38:28.353932+0000 mgr.vm06.qbbldl (mgr.14229) 308 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:28.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:28 vm09 bash[34466]: cluster 2026-04-15T13:38:28.353932+0000 mgr.vm06.qbbldl (mgr.14229) 308 : cluster [DBG] pgmap v155: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:30.226 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:30.432 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:30.432 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 32s ago 3m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:30.432 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 32s ago 3m - - 2026-04-15T13:38:30.432 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 33s ago 3m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:30.432 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 33s ago 3m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:30.687 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:30.687 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:30.687 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:30.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:30 vm06 bash[28114]: audit 2026-04-15T13:38:30.207369+0000 mgr.vm06.qbbldl (mgr.14229) 309 : audit [DBG] from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:30.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:30 vm06 bash[28114]: audit 2026-04-15T13:38:30.207369+0000 mgr.vm06.qbbldl (mgr.14229) 309 : audit [DBG] from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:30.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:30 vm06 bash[28114]: cluster 2026-04-15T13:38:30.354385+0000 mgr.vm06.qbbldl (mgr.14229) 310 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:30.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:30 vm06 bash[28114]: cluster 2026-04-15T13:38:30.354385+0000 mgr.vm06.qbbldl (mgr.14229) 310 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:30 vm09 bash[34466]: audit 2026-04-15T13:38:30.207369+0000 mgr.vm06.qbbldl (mgr.14229) 309 : audit [DBG] from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:30 vm09 bash[34466]: audit 2026-04-15T13:38:30.207369+0000 mgr.vm06.qbbldl (mgr.14229) 309 : audit [DBG] from='client.15128 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:30 vm09 bash[34466]: cluster 2026-04-15T13:38:30.354385+0000 mgr.vm06.qbbldl (mgr.14229) 310 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:30 vm09 bash[34466]: cluster 2026-04-15T13:38:30.354385+0000 mgr.vm06.qbbldl (mgr.14229) 310 : cluster [DBG] pgmap v156: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:31.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:31 vm06 bash[28114]: audit 2026-04-15T13:38:30.425298+0000 mgr.vm06.qbbldl (mgr.14229) 311 : audit [DBG] from='client.15132 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:31.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:31 vm06 bash[28114]: audit 2026-04-15T13:38:30.425298+0000 mgr.vm06.qbbldl (mgr.14229) 311 : audit [DBG] from='client.15132 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:31.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:31 vm06 bash[28114]: audit 2026-04-15T13:38:30.685993+0000 mon.vm06 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.106:0/2152551330' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:31.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:31 vm06 bash[28114]: audit 2026-04-15T13:38:30.685993+0000 mon.vm06 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.106:0/2152551330' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:31 vm09 bash[34466]: audit 2026-04-15T13:38:30.425298+0000 mgr.vm06.qbbldl (mgr.14229) 311 : audit [DBG] from='client.15132 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:31 vm09 bash[34466]: audit 2026-04-15T13:38:30.425298+0000 mgr.vm06.qbbldl (mgr.14229) 311 : audit [DBG] from='client.15132 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:31 vm09 bash[34466]: audit 2026-04-15T13:38:30.685993+0000 mon.vm06 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.106:0/2152551330' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:31 vm09 bash[34466]: audit 2026-04-15T13:38:30.685993+0000 mon.vm06 (mon.0) 986 : audit [DBG] from='client.? 192.168.123.106:0/2152551330' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:32.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:32 vm06 bash[28114]: cluster 2026-04-15T13:38:32.354814+0000 mgr.vm06.qbbldl (mgr.14229) 312 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:32.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:32 vm06 bash[28114]: cluster 2026-04-15T13:38:32.354814+0000 mgr.vm06.qbbldl (mgr.14229) 312 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:32.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:32 vm09 bash[34466]: cluster 2026-04-15T13:38:32.354814+0000 mgr.vm06.qbbldl (mgr.14229) 312 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:32 vm09 bash[34466]: cluster 2026-04-15T13:38:32.354814+0000 mgr.vm06.qbbldl (mgr.14229) 312 : cluster [DBG] pgmap v157: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:34.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:34 vm06 bash[28114]: cluster 2026-04-15T13:38:34.355275+0000 mgr.vm06.qbbldl (mgr.14229) 313 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:34.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:34 vm06 bash[28114]: cluster 2026-04-15T13:38:34.355275+0000 mgr.vm06.qbbldl (mgr.14229) 313 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:34.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:34 vm09 bash[34466]: cluster 2026-04-15T13:38:34.355275+0000 mgr.vm06.qbbldl (mgr.14229) 313 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:34.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:34 vm09 bash[34466]: cluster 2026-04-15T13:38:34.355275+0000 mgr.vm06.qbbldl (mgr.14229) 313 : cluster [DBG] pgmap v158: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:35.895 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:36.076 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:36.076 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 38s ago 3m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:36.076 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 38s ago 3m - - 2026-04-15T13:38:36.076 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 38s ago 3m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:36.076 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 38s ago 3m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:36.304 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:36.304 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:36.304 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:36 vm09 bash[34466]: audit 2026-04-15T13:38:36.302941+0000 mon.vm06 (mon.0) 987 : audit [DBG] from='client.? 192.168.123.106:0/1729990883' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:36 vm09 bash[34466]: audit 2026-04-15T13:38:36.302941+0000 mon.vm06 (mon.0) 987 : audit [DBG] from='client.? 192.168.123.106:0/1729990883' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:36.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:36 vm06 bash[28114]: audit 2026-04-15T13:38:36.302941+0000 mon.vm06 (mon.0) 987 : audit [DBG] from='client.? 192.168.123.106:0/1729990883' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:36.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:36 vm06 bash[28114]: audit 2026-04-15T13:38:36.302941+0000 mon.vm06 (mon.0) 987 : audit [DBG] from='client.? 192.168.123.106:0/1729990883' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:37.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:37 vm06 bash[28114]: audit 2026-04-15T13:38:35.878269+0000 mgr.vm06.qbbldl (mgr.14229) 314 : audit [DBG] from='client.15140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:37 vm06 bash[28114]: audit 2026-04-15T13:38:35.878269+0000 mgr.vm06.qbbldl (mgr.14229) 314 : audit [DBG] from='client.15140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:37 vm06 bash[28114]: audit 2026-04-15T13:38:36.071632+0000 mgr.vm06.qbbldl (mgr.14229) 315 : audit [DBG] from='client.15144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:37 vm06 bash[28114]: audit 2026-04-15T13:38:36.071632+0000 mgr.vm06.qbbldl (mgr.14229) 315 : audit [DBG] from='client.15144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:37 vm06 bash[28114]: cluster 2026-04-15T13:38:36.355738+0000 mgr.vm06.qbbldl (mgr.14229) 316 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:37.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:37 vm06 bash[28114]: cluster 2026-04-15T13:38:36.355738+0000 mgr.vm06.qbbldl (mgr.14229) 316 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:37 vm09 bash[34466]: audit 2026-04-15T13:38:35.878269+0000 mgr.vm06.qbbldl (mgr.14229) 314 : audit [DBG] from='client.15140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:37 vm09 bash[34466]: audit 2026-04-15T13:38:35.878269+0000 mgr.vm06.qbbldl (mgr.14229) 314 : audit [DBG] from='client.15140 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:37 vm09 bash[34466]: audit 2026-04-15T13:38:36.071632+0000 mgr.vm06.qbbldl (mgr.14229) 315 : audit [DBG] from='client.15144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:37 vm09 bash[34466]: audit 2026-04-15T13:38:36.071632+0000 mgr.vm06.qbbldl (mgr.14229) 315 : audit [DBG] from='client.15144 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:37 vm09 bash[34466]: cluster 2026-04-15T13:38:36.355738+0000 mgr.vm06.qbbldl (mgr.14229) 316 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:37 vm09 bash[34466]: cluster 2026-04-15T13:38:36.355738+0000 mgr.vm06.qbbldl (mgr.14229) 316 : cluster [DBG] pgmap v159: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:39.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:39 vm06 bash[28114]: cluster 2026-04-15T13:38:38.356206+0000 mgr.vm06.qbbldl (mgr.14229) 317 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:39.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:39 vm06 bash[28114]: cluster 2026-04-15T13:38:38.356206+0000 mgr.vm06.qbbldl (mgr.14229) 317 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:39.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:39 vm06 bash[28114]: audit 2026-04-15T13:38:38.484398+0000 mon.vm06 (mon.0) 988 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:39.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:39 vm06 bash[28114]: audit 2026-04-15T13:38:38.484398+0000 mon.vm06 (mon.0) 988 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:39.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:39 vm09 bash[34466]: cluster 2026-04-15T13:38:38.356206+0000 mgr.vm06.qbbldl (mgr.14229) 317 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:39.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:39 vm09 bash[34466]: cluster 2026-04-15T13:38:38.356206+0000 mgr.vm06.qbbldl (mgr.14229) 317 : cluster [DBG] pgmap v160: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:39.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:39 vm09 bash[34466]: audit 2026-04-15T13:38:38.484398+0000 mon.vm06 (mon.0) 988 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:39 vm09 bash[34466]: audit 2026-04-15T13:38:38.484398+0000 mon.vm06 (mon.0) 988 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:41.530 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:41.729 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:41.729 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 43s ago 3m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:41.729 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 43s ago 3m - - 2026-04-15T13:38:41.729 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 44s ago 3m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:41.729 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 44s ago 3m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:41.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:41 vm06 bash[28114]: cluster 2026-04-15T13:38:40.356737+0000 mgr.vm06.qbbldl (mgr.14229) 318 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:41.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:41 vm06 bash[28114]: cluster 2026-04-15T13:38:40.356737+0000 mgr.vm06.qbbldl (mgr.14229) 318 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:41.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:41 vm09 bash[34466]: cluster 2026-04-15T13:38:40.356737+0000 mgr.vm06.qbbldl (mgr.14229) 318 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:41 vm09 bash[34466]: cluster 2026-04-15T13:38:40.356737+0000 mgr.vm06.qbbldl (mgr.14229) 318 : cluster [DBG] pgmap v161: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:41.968 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:41.968 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:41.968 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:42.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:42 vm06 bash[28114]: audit 2026-04-15T13:38:41.966829+0000 mon.vm06 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.106:0/4218481222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:42.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:42 vm06 bash[28114]: audit 2026-04-15T13:38:41.966829+0000 mon.vm06 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.106:0/4218481222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:42 vm09 bash[34466]: audit 2026-04-15T13:38:41.966829+0000 mon.vm06 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.106:0/4218481222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:42 vm09 bash[34466]: audit 2026-04-15T13:38:41.966829+0000 mon.vm06 (mon.0) 989 : audit [DBG] from='client.? 192.168.123.106:0/4218481222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:43.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:43 vm06 bash[28114]: audit 2026-04-15T13:38:41.509599+0000 mgr.vm06.qbbldl (mgr.14229) 319 : audit [DBG] from='client.24739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:43 vm06 bash[28114]: audit 2026-04-15T13:38:41.509599+0000 mgr.vm06.qbbldl (mgr.14229) 319 : audit [DBG] from='client.24739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:43 vm06 bash[28114]: audit 2026-04-15T13:38:41.724473+0000 mgr.vm06.qbbldl (mgr.14229) 320 : audit [DBG] from='client.15154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:43 vm06 bash[28114]: audit 2026-04-15T13:38:41.724473+0000 mgr.vm06.qbbldl (mgr.14229) 320 : audit [DBG] from='client.15154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:43 vm06 bash[28114]: cluster 2026-04-15T13:38:42.357094+0000 mgr.vm06.qbbldl (mgr.14229) 321 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:43.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:43 vm06 bash[28114]: cluster 2026-04-15T13:38:42.357094+0000 mgr.vm06.qbbldl (mgr.14229) 321 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:43.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:43 vm09 bash[34466]: audit 2026-04-15T13:38:41.509599+0000 mgr.vm06.qbbldl (mgr.14229) 319 : audit [DBG] from='client.24739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:43 vm09 bash[34466]: audit 2026-04-15T13:38:41.509599+0000 mgr.vm06.qbbldl (mgr.14229) 319 : audit [DBG] from='client.24739 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:43 vm09 bash[34466]: audit 2026-04-15T13:38:41.724473+0000 mgr.vm06.qbbldl (mgr.14229) 320 : audit [DBG] from='client.15154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:43 vm09 bash[34466]: audit 2026-04-15T13:38:41.724473+0000 mgr.vm06.qbbldl (mgr.14229) 320 : audit [DBG] from='client.15154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:43.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:43 vm09 bash[34466]: cluster 2026-04-15T13:38:42.357094+0000 mgr.vm06.qbbldl (mgr.14229) 321 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:43.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:43 vm09 bash[34466]: cluster 2026-04-15T13:38:42.357094+0000 mgr.vm06.qbbldl (mgr.14229) 321 : cluster [DBG] pgmap v162: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:45.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:45 vm06 bash[28114]: cluster 2026-04-15T13:38:44.357557+0000 mgr.vm06.qbbldl (mgr.14229) 322 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:45.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:45 vm06 bash[28114]: cluster 2026-04-15T13:38:44.357557+0000 mgr.vm06.qbbldl (mgr.14229) 322 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:45.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:45 vm09 bash[34466]: cluster 2026-04-15T13:38:44.357557+0000 mgr.vm06.qbbldl (mgr.14229) 322 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:45.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:45 vm09 bash[34466]: cluster 2026-04-15T13:38:44.357557+0000 mgr.vm06.qbbldl (mgr.14229) 322 : cluster [DBG] pgmap v163: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:46.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:46 vm06 bash[28114]: cluster 2026-04-15T13:38:46.358024+0000 mgr.vm06.qbbldl (mgr.14229) 323 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:46.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:46 vm06 bash[28114]: cluster 2026-04-15T13:38:46.358024+0000 mgr.vm06.qbbldl (mgr.14229) 323 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:46.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:46 vm09 bash[34466]: cluster 2026-04-15T13:38:46.358024+0000 mgr.vm06.qbbldl (mgr.14229) 323 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:46.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:46 vm09 bash[34466]: cluster 2026-04-15T13:38:46.358024+0000 mgr.vm06.qbbldl (mgr.14229) 323 : cluster [DBG] pgmap v164: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:47.190 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:47.386 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:47.387 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 49s ago 3m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:47.387 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 49s ago 3m - - 2026-04-15T13:38:47.387 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 50s ago 3m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:47.387 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 50s ago 3m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:47.638 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:47.639 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:47.639 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:47.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:47 vm06 bash[28114]: audit 2026-04-15T13:38:47.169432+0000 mgr.vm06.qbbldl (mgr.14229) 324 : audit [DBG] from='client.15162 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:47 vm06 bash[28114]: audit 2026-04-15T13:38:47.169432+0000 mgr.vm06.qbbldl (mgr.14229) 324 : audit [DBG] from='client.15162 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:47 vm06 bash[28114]: audit 2026-04-15T13:38:47.381382+0000 mgr.vm06.qbbldl (mgr.14229) 325 : audit [DBG] from='client.15166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:47 vm06 bash[28114]: audit 2026-04-15T13:38:47.381382+0000 mgr.vm06.qbbldl (mgr.14229) 325 : audit [DBG] from='client.15166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:47 vm09 bash[34466]: audit 2026-04-15T13:38:47.169432+0000 mgr.vm06.qbbldl (mgr.14229) 324 : audit [DBG] from='client.15162 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:47 vm09 bash[34466]: audit 2026-04-15T13:38:47.169432+0000 mgr.vm06.qbbldl (mgr.14229) 324 : audit [DBG] from='client.15162 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:47 vm09 bash[34466]: audit 2026-04-15T13:38:47.381382+0000 mgr.vm06.qbbldl (mgr.14229) 325 : audit [DBG] from='client.15166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:47 vm09 bash[34466]: audit 2026-04-15T13:38:47.381382+0000 mgr.vm06.qbbldl (mgr.14229) 325 : audit [DBG] from='client.15166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:48.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:48 vm06 bash[28114]: audit 2026-04-15T13:38:47.637049+0000 mon.vm06 (mon.0) 990 : audit [DBG] from='client.? 192.168.123.106:0/3520984184' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:48.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:48 vm06 bash[28114]: audit 2026-04-15T13:38:47.637049+0000 mon.vm06 (mon.0) 990 : audit [DBG] from='client.? 192.168.123.106:0/3520984184' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:48.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:48 vm06 bash[28114]: cluster 2026-04-15T13:38:48.358490+0000 mgr.vm06.qbbldl (mgr.14229) 326 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:48.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:48 vm06 bash[28114]: cluster 2026-04-15T13:38:48.358490+0000 mgr.vm06.qbbldl (mgr.14229) 326 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:48.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:48 vm09 bash[34466]: audit 2026-04-15T13:38:47.637049+0000 mon.vm06 (mon.0) 990 : audit [DBG] from='client.? 192.168.123.106:0/3520984184' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:48.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:48 vm09 bash[34466]: audit 2026-04-15T13:38:47.637049+0000 mon.vm06 (mon.0) 990 : audit [DBG] from='client.? 192.168.123.106:0/3520984184' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:48.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:48 vm09 bash[34466]: cluster 2026-04-15T13:38:48.358490+0000 mgr.vm06.qbbldl (mgr.14229) 326 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:48.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:48 vm09 bash[34466]: cluster 2026-04-15T13:38:48.358490+0000 mgr.vm06.qbbldl (mgr.14229) 326 : cluster [DBG] pgmap v165: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:50.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:50 vm06 bash[28114]: cluster 2026-04-15T13:38:50.358855+0000 mgr.vm06.qbbldl (mgr.14229) 327 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:50.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:50 vm06 bash[28114]: cluster 2026-04-15T13:38:50.358855+0000 mgr.vm06.qbbldl (mgr.14229) 327 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:50 vm09 bash[34466]: cluster 2026-04-15T13:38:50.358855+0000 mgr.vm06.qbbldl (mgr.14229) 327 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:50 vm09 bash[34466]: cluster 2026-04-15T13:38:50.358855+0000 mgr.vm06.qbbldl (mgr.14229) 327 : cluster [DBG] pgmap v166: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:38:52.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:52 vm06 bash[28114]: cluster 2026-04-15T13:38:52.359307+0000 mgr.vm06.qbbldl (mgr.14229) 328 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:52.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:52 vm06 bash[28114]: cluster 2026-04-15T13:38:52.359307+0000 mgr.vm06.qbbldl (mgr.14229) 328 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:52.857 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:52.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:52 vm09 bash[34466]: cluster 2026-04-15T13:38:52.359307+0000 mgr.vm06.qbbldl (mgr.14229) 328 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:52.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:52 vm09 bash[34466]: cluster 2026-04-15T13:38:52.359307+0000 mgr.vm06.qbbldl (mgr.14229) 328 : cluster [DBG] pgmap v167: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:38:53.048 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:53.048 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 55s ago 3m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:53.048 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 55s ago 3m - - 2026-04-15T13:38:53.048 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 55s ago 3m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:53.048 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 55s ago 3m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:53.292 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:53.292 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:53.292 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:53.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:53 vm06 bash[28114]: audit 2026-04-15T13:38:52.838152+0000 mgr.vm06.qbbldl (mgr.14229) 329 : audit [DBG] from='client.15174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:53 vm06 bash[28114]: audit 2026-04-15T13:38:52.838152+0000 mgr.vm06.qbbldl (mgr.14229) 329 : audit [DBG] from='client.15174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:53 vm06 bash[28114]: audit 2026-04-15T13:38:53.043638+0000 mgr.vm06.qbbldl (mgr.14229) 330 : audit [DBG] from='client.15178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:53 vm06 bash[28114]: audit 2026-04-15T13:38:53.043638+0000 mgr.vm06.qbbldl (mgr.14229) 330 : audit [DBG] from='client.15178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:53 vm06 bash[28114]: audit 2026-04-15T13:38:53.290406+0000 mon.vm06 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.106:0/1633144500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:53 vm06 bash[28114]: audit 2026-04-15T13:38:53.290406+0000 mon.vm06 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.106:0/1633144500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:53 vm09 bash[34466]: audit 2026-04-15T13:38:52.838152+0000 mgr.vm06.qbbldl (mgr.14229) 329 : audit [DBG] from='client.15174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:53 vm09 bash[34466]: audit 2026-04-15T13:38:52.838152+0000 mgr.vm06.qbbldl (mgr.14229) 329 : audit [DBG] from='client.15174 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:53 vm09 bash[34466]: audit 2026-04-15T13:38:53.043638+0000 mgr.vm06.qbbldl (mgr.14229) 330 : audit [DBG] from='client.15178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:53 vm09 bash[34466]: audit 2026-04-15T13:38:53.043638+0000 mgr.vm06.qbbldl (mgr.14229) 330 : audit [DBG] from='client.15178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:53 vm09 bash[34466]: audit 2026-04-15T13:38:53.290406+0000 mon.vm06 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.106:0/1633144500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:53 vm09 bash[34466]: audit 2026-04-15T13:38:53.290406+0000 mon.vm06 (mon.0) 991 : audit [DBG] from='client.? 192.168.123.106:0/1633144500' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:54.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:54 vm06 bash[28114]: audit 2026-04-15T13:38:53.484619+0000 mon.vm06 (mon.0) 992 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:54.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:54 vm06 bash[28114]: audit 2026-04-15T13:38:53.484619+0000 mon.vm06 (mon.0) 992 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:54.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:54 vm06 bash[28114]: cluster 2026-04-15T13:38:54.360085+0000 mgr.vm06.qbbldl (mgr.14229) 331 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:54.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:54 vm06 bash[28114]: cluster 2026-04-15T13:38:54.360085+0000 mgr.vm06.qbbldl (mgr.14229) 331 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:54 vm09 bash[34466]: audit 2026-04-15T13:38:53.484619+0000 mon.vm06 (mon.0) 992 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:54 vm09 bash[34466]: audit 2026-04-15T13:38:53.484619+0000 mon.vm06 (mon.0) 992 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:38:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:54 vm09 bash[34466]: cluster 2026-04-15T13:38:54.360085+0000 mgr.vm06.qbbldl (mgr.14229) 331 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:54 vm09 bash[34466]: cluster 2026-04-15T13:38:54.360085+0000 mgr.vm06.qbbldl (mgr.14229) 331 : cluster [DBG] pgmap v168: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:57.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:56 vm06 bash[28114]: cluster 2026-04-15T13:38:56.360586+0000 mgr.vm06.qbbldl (mgr.14229) 332 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:57.014 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:56 vm06 bash[28114]: cluster 2026-04-15T13:38:56.360586+0000 mgr.vm06.qbbldl (mgr.14229) 332 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:57.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:56 vm09 bash[34466]: cluster 2026-04-15T13:38:56.360586+0000 mgr.vm06.qbbldl (mgr.14229) 332 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:57.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:56 vm09 bash[34466]: cluster 2026-04-15T13:38:56.360586+0000 mgr.vm06.qbbldl (mgr.14229) 332 : cluster [DBG] pgmap v169: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:58.507 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:38:58.713 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:38:58.713 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 60s ago 3m 100M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:38:58.713 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 60s ago 3m - - 2026-04-15T13:38:58.713 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 61s ago 3m 107M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:38:58.713 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 61s ago 3m 106M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:38:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:58 vm06 bash[28114]: cluster 2026-04-15T13:38:58.360987+0000 mgr.vm06.qbbldl (mgr.14229) 333 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:58 vm06 bash[28114]: cluster 2026-04-15T13:38:58.360987+0000 mgr.vm06.qbbldl (mgr.14229) 333 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:58 vm06 bash[28114]: audit 2026-04-15T13:38:58.373832+0000 mon.vm06 (mon.0) 993 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:38:58.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:58 vm06 bash[28114]: audit 2026-04-15T13:38:58.373832+0000 mon.vm06 (mon.0) 993 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:38:58.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:58 vm09 bash[34466]: cluster 2026-04-15T13:38:58.360987+0000 mgr.vm06.qbbldl (mgr.14229) 333 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:58 vm09 bash[34466]: cluster 2026-04-15T13:38:58.360987+0000 mgr.vm06.qbbldl (mgr.14229) 333 : cluster [DBG] pgmap v170: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:38:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:58 vm09 bash[34466]: audit 2026-04-15T13:38:58.373832+0000 mon.vm06 (mon.0) 993 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:38:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:58 vm09 bash[34466]: audit 2026-04-15T13:38:58.373832+0000 mon.vm06 (mon.0) 993 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:38:58.969 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:38:58.969 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:38:58.969 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:38:59.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:59 vm06 bash[28114]: audit 2026-04-15T13:38:58.487868+0000 mgr.vm06.qbbldl (mgr.14229) 334 : audit [DBG] from='client.15186 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:59 vm06 bash[28114]: audit 2026-04-15T13:38:58.487868+0000 mgr.vm06.qbbldl (mgr.14229) 334 : audit [DBG] from='client.15186 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:59 vm06 bash[28114]: audit 2026-04-15T13:38:58.706905+0000 mgr.vm06.qbbldl (mgr.14229) 335 : audit [DBG] from='client.15190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:59 vm06 bash[28114]: audit 2026-04-15T13:38:58.706905+0000 mgr.vm06.qbbldl (mgr.14229) 335 : audit [DBG] from='client.15190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:59 vm06 bash[28114]: audit 2026-04-15T13:38:58.967460+0000 mon.vm06 (mon.0) 994 : audit [DBG] from='client.? 192.168.123.106:0/3345772879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:59.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:38:59 vm06 bash[28114]: audit 2026-04-15T13:38:58.967460+0000 mon.vm06 (mon.0) 994 : audit [DBG] from='client.? 192.168.123.106:0/3345772879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:59 vm09 bash[34466]: audit 2026-04-15T13:38:58.487868+0000 mgr.vm06.qbbldl (mgr.14229) 334 : audit [DBG] from='client.15186 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:59 vm09 bash[34466]: audit 2026-04-15T13:38:58.487868+0000 mgr.vm06.qbbldl (mgr.14229) 334 : audit [DBG] from='client.15186 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:59 vm09 bash[34466]: audit 2026-04-15T13:38:58.706905+0000 mgr.vm06.qbbldl (mgr.14229) 335 : audit [DBG] from='client.15190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:59 vm09 bash[34466]: audit 2026-04-15T13:38:58.706905+0000 mgr.vm06.qbbldl (mgr.14229) 335 : audit [DBG] from='client.15190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:38:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:59 vm09 bash[34466]: audit 2026-04-15T13:38:58.967460+0000 mon.vm06 (mon.0) 994 : audit [DBG] from='client.? 192.168.123.106:0/3345772879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:38:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:38:59 vm09 bash[34466]: audit 2026-04-15T13:38:58.967460+0000 mon.vm06 (mon.0) 994 : audit [DBG] from='client.? 192.168.123.106:0/3345772879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:00.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:00 vm06 bash[28114]: cluster 2026-04-15T13:39:00.361464+0000 mgr.vm06.qbbldl (mgr.14229) 336 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:00.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:00 vm06 bash[28114]: cluster 2026-04-15T13:39:00.361464+0000 mgr.vm06.qbbldl (mgr.14229) 336 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:00.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:00 vm09 bash[34466]: cluster 2026-04-15T13:39:00.361464+0000 mgr.vm06.qbbldl (mgr.14229) 336 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:00.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:00 vm09 bash[34466]: cluster 2026-04-15T13:39:00.361464+0000 mgr.vm06.qbbldl (mgr.14229) 336 : cluster [DBG] pgmap v171: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:02.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:02 vm06 bash[28114]: cluster 2026-04-15T13:39:02.361941+0000 mgr.vm06.qbbldl (mgr.14229) 337 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:02.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:02 vm06 bash[28114]: cluster 2026-04-15T13:39:02.361941+0000 mgr.vm06.qbbldl (mgr.14229) 337 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:02.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:02 vm09 bash[34466]: cluster 2026-04-15T13:39:02.361941+0000 mgr.vm06.qbbldl (mgr.14229) 337 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:02.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:02 vm09 bash[34466]: cluster 2026-04-15T13:39:02.361941+0000 mgr.vm06.qbbldl (mgr.14229) 337 : cluster [DBG] pgmap v172: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:04.198 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:04.392 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:04.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 0s ago 3m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:04.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 0s ago 3m - - 2026-04-15T13:39:04.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 1s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:04.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 1s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.234013+0000 mon.vm06 (mon.0) 995 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.234013+0000 mon.vm06 (mon.0) 995 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.239063+0000 mon.vm06 (mon.0) 996 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.239063+0000 mon.vm06 (mon.0) 996 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.827754+0000 mon.vm06 (mon.0) 997 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.827754+0000 mon.vm06 (mon.0) 997 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.833365+0000 mon.vm06 (mon.0) 998 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:03.833365+0000 mon.vm06 (mon.0) 998 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:04.188641+0000 mon.vm06 (mon.0) 999 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:04.188641+0000 mon.vm06 (mon.0) 999 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:04.189587+0000 mon.vm06 (mon.0) 1000 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:39:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:04 vm09 bash[34466]: audit 2026-04-15T13:39:04.189587+0000 mon.vm06 (mon.0) 1000 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:39:04.633 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:04.633 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:04.633 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:04.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.234013+0000 mon.vm06 (mon.0) 995 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.234013+0000 mon.vm06 (mon.0) 995 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.239063+0000 mon.vm06 (mon.0) 996 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.239063+0000 mon.vm06 (mon.0) 996 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.827754+0000 mon.vm06 (mon.0) 997 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.827754+0000 mon.vm06 (mon.0) 997 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.833365+0000 mon.vm06 (mon.0) 998 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:03.833365+0000 mon.vm06 (mon.0) 998 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:04.188641+0000 mon.vm06 (mon.0) 999 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:04.188641+0000 mon.vm06 (mon.0) 999 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:04.189587+0000 mon.vm06 (mon.0) 1000 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:39:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:04 vm06 bash[28114]: audit 2026-04-15T13:39:04.189587+0000 mon.vm06 (mon.0) 1000 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:39:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.166754+0000 mgr.vm06.qbbldl (mgr.14229) 338 : audit [DBG] from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.166754+0000 mgr.vm06.qbbldl (mgr.14229) 338 : audit [DBG] from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: cluster 2026-04-15T13:39:04.190798+0000 mgr.vm06.qbbldl (mgr.14229) 339 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 173 B/s wr, 0 op/s 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: cluster 2026-04-15T13:39:04.190798+0000 mgr.vm06.qbbldl (mgr.14229) 339 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 173 B/s wr, 0 op/s 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: cluster 2026-04-15T13:39:04.190936+0000 mgr.vm06.qbbldl (mgr.14229) 340 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: cluster 2026-04-15T13:39:04.190936+0000 mgr.vm06.qbbldl (mgr.14229) 340 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.257981+0000 mon.vm06 (mon.0) 1001 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.257981+0000 mon.vm06 (mon.0) 1001 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.260354+0000 mon.vm06 (mon.0) 1002 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.260354+0000 mon.vm06 (mon.0) 1002 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.387580+0000 mgr.vm06.qbbldl (mgr.14229) 341 : audit [DBG] from='client.15202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.387580+0000 mgr.vm06.qbbldl (mgr.14229) 341 : audit [DBG] from='client.15202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.631591+0000 mon.vm06 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.106:0/815514355' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:05 vm09 bash[34466]: audit 2026-04-15T13:39:04.631591+0000 mon.vm06 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.106:0/815514355' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.166754+0000 mgr.vm06.qbbldl (mgr.14229) 338 : audit [DBG] from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.166754+0000 mgr.vm06.qbbldl (mgr.14229) 338 : audit [DBG] from='client.15198 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: cluster 2026-04-15T13:39:04.190798+0000 mgr.vm06.qbbldl (mgr.14229) 339 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 173 B/s wr, 0 op/s 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: cluster 2026-04-15T13:39:04.190798+0000 mgr.vm06.qbbldl (mgr.14229) 339 : cluster [DBG] pgmap v173: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 86 B/s rd, 173 B/s wr, 0 op/s 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: cluster 2026-04-15T13:39:04.190936+0000 mgr.vm06.qbbldl (mgr.14229) 340 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: cluster 2026-04-15T13:39:04.190936+0000 mgr.vm06.qbbldl (mgr.14229) 340 : cluster [DBG] pgmap v174: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.257981+0000 mon.vm06 (mon.0) 1001 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.257981+0000 mon.vm06 (mon.0) 1001 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.260354+0000 mon.vm06 (mon.0) 1002 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.260354+0000 mon.vm06 (mon.0) 1002 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.387580+0000 mgr.vm06.qbbldl (mgr.14229) 341 : audit [DBG] from='client.15202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.387580+0000 mgr.vm06.qbbldl (mgr.14229) 341 : audit [DBG] from='client.15202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.631591+0000 mon.vm06 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.106:0/815514355' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:05 vm06 bash[28114]: audit 2026-04-15T13:39:04.631591+0000 mon.vm06 (mon.0) 1003 : audit [DBG] from='client.? 192.168.123.106:0/815514355' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:07 vm09 bash[34466]: cluster 2026-04-15T13:39:06.191439+0000 mgr.vm06.qbbldl (mgr.14229) 342 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:07 vm09 bash[34466]: cluster 2026-04-15T13:39:06.191439+0000 mgr.vm06.qbbldl (mgr.14229) 342 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:07.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:07 vm06 bash[28114]: cluster 2026-04-15T13:39:06.191439+0000 mgr.vm06.qbbldl (mgr.14229) 342 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:07.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:07 vm06 bash[28114]: cluster 2026-04-15T13:39:06.191439+0000 mgr.vm06.qbbldl (mgr.14229) 342 : cluster [DBG] pgmap v175: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:09 vm09 bash[34466]: cluster 2026-04-15T13:39:08.191883+0000 mgr.vm06.qbbldl (mgr.14229) 343 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:09 vm09 bash[34466]: cluster 2026-04-15T13:39:08.191883+0000 mgr.vm06.qbbldl (mgr.14229) 343 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:09 vm09 bash[34466]: audit 2026-04-15T13:39:08.484859+0000 mon.vm06 (mon.0) 1004 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:09 vm09 bash[34466]: audit 2026-04-15T13:39:08.484859+0000 mon.vm06 (mon.0) 1004 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:09.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:09 vm06 bash[28114]: cluster 2026-04-15T13:39:08.191883+0000 mgr.vm06.qbbldl (mgr.14229) 343 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:09.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:09 vm06 bash[28114]: cluster 2026-04-15T13:39:08.191883+0000 mgr.vm06.qbbldl (mgr.14229) 343 : cluster [DBG] pgmap v176: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:09.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:09 vm06 bash[28114]: audit 2026-04-15T13:39:08.484859+0000 mon.vm06 (mon.0) 1004 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:09.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:09 vm06 bash[28114]: audit 2026-04-15T13:39:08.484859+0000 mon.vm06 (mon.0) 1004 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:09.850 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:10.040 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:10.040 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 6s ago 3m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:10.040 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 6s ago 3m - - 2026-04-15T13:39:10.040 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 6s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:10.040 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 6s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:10.272 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:10.272 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:10.272 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:10 vm09 bash[34466]: audit 2026-04-15T13:39:10.270957+0000 mon.vm06 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.106:0/4200353741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:10 vm09 bash[34466]: audit 2026-04-15T13:39:10.270957+0000 mon.vm06 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.106:0/4200353741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:10.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:10 vm06 bash[28114]: audit 2026-04-15T13:39:10.270957+0000 mon.vm06 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.106:0/4200353741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:10.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:10 vm06 bash[28114]: audit 2026-04-15T13:39:10.270957+0000 mon.vm06 (mon.0) 1005 : audit [DBG] from='client.? 192.168.123.106:0/4200353741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:11 vm09 bash[34466]: audit 2026-04-15T13:39:09.830814+0000 mgr.vm06.qbbldl (mgr.14229) 344 : audit [DBG] from='client.15210 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:11 vm09 bash[34466]: audit 2026-04-15T13:39:09.830814+0000 mgr.vm06.qbbldl (mgr.14229) 344 : audit [DBG] from='client.15210 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:11 vm09 bash[34466]: audit 2026-04-15T13:39:10.035741+0000 mgr.vm06.qbbldl (mgr.14229) 345 : audit [DBG] from='client.24789 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:11 vm09 bash[34466]: audit 2026-04-15T13:39:10.035741+0000 mgr.vm06.qbbldl (mgr.14229) 345 : audit [DBG] from='client.24789 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:11 vm09 bash[34466]: cluster 2026-04-15T13:39:10.192398+0000 mgr.vm06.qbbldl (mgr.14229) 346 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:11 vm09 bash[34466]: cluster 2026-04-15T13:39:10.192398+0000 mgr.vm06.qbbldl (mgr.14229) 346 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:11.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:11 vm06 bash[28114]: audit 2026-04-15T13:39:09.830814+0000 mgr.vm06.qbbldl (mgr.14229) 344 : audit [DBG] from='client.15210 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:11 vm06 bash[28114]: audit 2026-04-15T13:39:09.830814+0000 mgr.vm06.qbbldl (mgr.14229) 344 : audit [DBG] from='client.15210 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:11 vm06 bash[28114]: audit 2026-04-15T13:39:10.035741+0000 mgr.vm06.qbbldl (mgr.14229) 345 : audit [DBG] from='client.24789 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:11 vm06 bash[28114]: audit 2026-04-15T13:39:10.035741+0000 mgr.vm06.qbbldl (mgr.14229) 345 : audit [DBG] from='client.24789 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:11.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:11 vm06 bash[28114]: cluster 2026-04-15T13:39:10.192398+0000 mgr.vm06.qbbldl (mgr.14229) 346 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:11.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:11 vm06 bash[28114]: cluster 2026-04-15T13:39:10.192398+0000 mgr.vm06.qbbldl (mgr.14229) 346 : cluster [DBG] pgmap v177: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:13 vm09 bash[34466]: cluster 2026-04-15T13:39:12.192734+0000 mgr.vm06.qbbldl (mgr.14229) 347 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:13 vm09 bash[34466]: cluster 2026-04-15T13:39:12.192734+0000 mgr.vm06.qbbldl (mgr.14229) 347 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:13.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:13 vm06 bash[28114]: cluster 2026-04-15T13:39:12.192734+0000 mgr.vm06.qbbldl (mgr.14229) 347 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:13.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:13 vm06 bash[28114]: cluster 2026-04-15T13:39:12.192734+0000 mgr.vm06.qbbldl (mgr.14229) 347 : cluster [DBG] pgmap v178: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 208 B/s rd, 416 B/s wr, 0 op/s 2026-04-15T13:39:15.479 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:15 vm09 bash[34466]: cluster 2026-04-15T13:39:14.193198+0000 mgr.vm06.qbbldl (mgr.14229) 348 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:39:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:15 vm09 bash[34466]: cluster 2026-04-15T13:39:14.193198+0000 mgr.vm06.qbbldl (mgr.14229) 348 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:39:15.669 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:15.669 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (2m) 11s ago 3m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:15.669 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 11s ago 3m - - 2026-04-15T13:39:15.669 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 12s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:15.669 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 12s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:15.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:15 vm06 bash[28114]: cluster 2026-04-15T13:39:14.193198+0000 mgr.vm06.qbbldl (mgr.14229) 348 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:39:15.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:15 vm06 bash[28114]: cluster 2026-04-15T13:39:14.193198+0000 mgr.vm06.qbbldl (mgr.14229) 348 : cluster [DBG] pgmap v179: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:39:15.906 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:15.906 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:15.906 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:16.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:16 vm09 bash[34466]: audit 2026-04-15T13:39:15.904232+0000 mon.vm06 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.106:0/1131817256' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:16.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:16 vm09 bash[34466]: audit 2026-04-15T13:39:15.904232+0000 mon.vm06 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.106:0/1131817256' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:16.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:16 vm06 bash[28114]: audit 2026-04-15T13:39:15.904232+0000 mon.vm06 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.106:0/1131817256' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:16.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:16 vm06 bash[28114]: audit 2026-04-15T13:39:15.904232+0000 mon.vm06 (mon.0) 1006 : audit [DBG] from='client.? 192.168.123.106:0/1131817256' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:17.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:17 vm09 bash[34466]: audit 2026-04-15T13:39:15.458462+0000 mgr.vm06.qbbldl (mgr.14229) 349 : audit [DBG] from='client.15222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:17.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:17 vm09 bash[34466]: audit 2026-04-15T13:39:15.458462+0000 mgr.vm06.qbbldl (mgr.14229) 349 : audit [DBG] from='client.15222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:17.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:17 vm09 bash[34466]: audit 2026-04-15T13:39:15.664440+0000 mgr.vm06.qbbldl (mgr.14229) 350 : audit [DBG] from='client.15226 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:17.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:17 vm09 bash[34466]: audit 2026-04-15T13:39:15.664440+0000 mgr.vm06.qbbldl (mgr.14229) 350 : audit [DBG] from='client.15226 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:17.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:17 vm09 bash[34466]: cluster 2026-04-15T13:39:16.193652+0000 mgr.vm06.qbbldl (mgr.14229) 351 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:17.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:17 vm09 bash[34466]: cluster 2026-04-15T13:39:16.193652+0000 mgr.vm06.qbbldl (mgr.14229) 351 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:18.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:17 vm06 bash[28114]: audit 2026-04-15T13:39:15.458462+0000 mgr.vm06.qbbldl (mgr.14229) 349 : audit [DBG] from='client.15222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:18.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:17 vm06 bash[28114]: audit 2026-04-15T13:39:15.458462+0000 mgr.vm06.qbbldl (mgr.14229) 349 : audit [DBG] from='client.15222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:18.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:17 vm06 bash[28114]: audit 2026-04-15T13:39:15.664440+0000 mgr.vm06.qbbldl (mgr.14229) 350 : audit [DBG] from='client.15226 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:18.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:17 vm06 bash[28114]: audit 2026-04-15T13:39:15.664440+0000 mgr.vm06.qbbldl (mgr.14229) 350 : audit [DBG] from='client.15226 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:18.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:17 vm06 bash[28114]: cluster 2026-04-15T13:39:16.193652+0000 mgr.vm06.qbbldl (mgr.14229) 351 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:18.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:17 vm06 bash[28114]: cluster 2026-04-15T13:39:16.193652+0000 mgr.vm06.qbbldl (mgr.14229) 351 : cluster [DBG] pgmap v180: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:18.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:18 vm09 bash[34466]: cluster 2026-04-15T13:39:18.194088+0000 mgr.vm06.qbbldl (mgr.14229) 352 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:18.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:18 vm09 bash[34466]: cluster 2026-04-15T13:39:18.194088+0000 mgr.vm06.qbbldl (mgr.14229) 352 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:18.886 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:18 vm06 bash[28114]: cluster 2026-04-15T13:39:18.194088+0000 mgr.vm06.qbbldl (mgr.14229) 352 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:18.886 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:18 vm06 bash[28114]: cluster 2026-04-15T13:39:18.194088+0000 mgr.vm06.qbbldl (mgr.14229) 352 : cluster [DBG] pgmap v181: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:21.128 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:21.315 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:21.315 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 17s ago 3m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:21.315 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 17s ago 3m - - 2026-04-15T13:39:21.315 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 18s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:21.315 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 18s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:21 vm06 bash[28114]: cluster 2026-04-15T13:39:20.194582+0000 mgr.vm06.qbbldl (mgr.14229) 353 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:21 vm06 bash[28114]: cluster 2026-04-15T13:39:20.194582+0000 mgr.vm06.qbbldl (mgr.14229) 353 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:21.544 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:21.544 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:21.544 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:21 vm09 bash[34466]: cluster 2026-04-15T13:39:20.194582+0000 mgr.vm06.qbbldl (mgr.14229) 353 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:21 vm09 bash[34466]: cluster 2026-04-15T13:39:20.194582+0000 mgr.vm06.qbbldl (mgr.14229) 353 : cluster [DBG] pgmap v182: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:22.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:22 vm06 bash[28114]: audit 2026-04-15T13:39:21.108204+0000 mgr.vm06.qbbldl (mgr.14229) 354 : audit [DBG] from='client.15234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:22 vm06 bash[28114]: audit 2026-04-15T13:39:21.108204+0000 mgr.vm06.qbbldl (mgr.14229) 354 : audit [DBG] from='client.15234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:22 vm06 bash[28114]: audit 2026-04-15T13:39:21.310840+0000 mgr.vm06.qbbldl (mgr.14229) 355 : audit [DBG] from='client.15238 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:22 vm06 bash[28114]: audit 2026-04-15T13:39:21.310840+0000 mgr.vm06.qbbldl (mgr.14229) 355 : audit [DBG] from='client.15238 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:22 vm06 bash[28114]: audit 2026-04-15T13:39:21.542090+0000 mon.vm06 (mon.0) 1007 : audit [DBG] from='client.? 192.168.123.106:0/4123593294' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:22.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:22 vm06 bash[28114]: audit 2026-04-15T13:39:21.542090+0000 mon.vm06 (mon.0) 1007 : audit [DBG] from='client.? 192.168.123.106:0/4123593294' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:22 vm09 bash[34466]: audit 2026-04-15T13:39:21.108204+0000 mgr.vm06.qbbldl (mgr.14229) 354 : audit [DBG] from='client.15234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:22 vm09 bash[34466]: audit 2026-04-15T13:39:21.108204+0000 mgr.vm06.qbbldl (mgr.14229) 354 : audit [DBG] from='client.15234 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:22 vm09 bash[34466]: audit 2026-04-15T13:39:21.310840+0000 mgr.vm06.qbbldl (mgr.14229) 355 : audit [DBG] from='client.15238 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:22 vm09 bash[34466]: audit 2026-04-15T13:39:21.310840+0000 mgr.vm06.qbbldl (mgr.14229) 355 : audit [DBG] from='client.15238 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:22 vm09 bash[34466]: audit 2026-04-15T13:39:21.542090+0000 mon.vm06 (mon.0) 1007 : audit [DBG] from='client.? 192.168.123.106:0/4123593294' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:22.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:22 vm09 bash[34466]: audit 2026-04-15T13:39:21.542090+0000 mon.vm06 (mon.0) 1007 : audit [DBG] from='client.? 192.168.123.106:0/4123593294' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:23 vm09 bash[34466]: cluster 2026-04-15T13:39:22.195001+0000 mgr.vm06.qbbldl (mgr.14229) 356 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:23 vm09 bash[34466]: cluster 2026-04-15T13:39:22.195001+0000 mgr.vm06.qbbldl (mgr.14229) 356 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:23.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:23 vm06 bash[28114]: cluster 2026-04-15T13:39:22.195001+0000 mgr.vm06.qbbldl (mgr.14229) 356 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:23.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:23 vm06 bash[28114]: cluster 2026-04-15T13:39:22.195001+0000 mgr.vm06.qbbldl (mgr.14229) 356 : cluster [DBG] pgmap v183: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:24 vm09 bash[34466]: audit 2026-04-15T13:39:23.485339+0000 mon.vm06 (mon.0) 1008 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:24 vm09 bash[34466]: audit 2026-04-15T13:39:23.485339+0000 mon.vm06 (mon.0) 1008 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:24.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:24 vm06 bash[28114]: audit 2026-04-15T13:39:23.485339+0000 mon.vm06 (mon.0) 1008 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:24.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:24 vm06 bash[28114]: audit 2026-04-15T13:39:23.485339+0000 mon.vm06 (mon.0) 1008 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:25.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:25 vm09 bash[34466]: cluster 2026-04-15T13:39:24.195460+0000 mgr.vm06.qbbldl (mgr.14229) 357 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:25.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:25 vm09 bash[34466]: cluster 2026-04-15T13:39:24.195460+0000 mgr.vm06.qbbldl (mgr.14229) 357 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:25.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:25 vm06 bash[28114]: cluster 2026-04-15T13:39:24.195460+0000 mgr.vm06.qbbldl (mgr.14229) 357 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:25.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:25 vm06 bash[28114]: cluster 2026-04-15T13:39:24.195460+0000 mgr.vm06.qbbldl (mgr.14229) 357 : cluster [DBG] pgmap v184: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:26.756 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:26.947 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:26.947 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 23s ago 3m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:26.947 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 23s ago 3m - - 2026-04-15T13:39:26.947 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 23s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:26.947 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (3m) 23s ago 3m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:27.175 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:27.175 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:27.175 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:27 vm09 bash[34466]: cluster 2026-04-15T13:39:26.195928+0000 mgr.vm06.qbbldl (mgr.14229) 358 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:27 vm09 bash[34466]: cluster 2026-04-15T13:39:26.195928+0000 mgr.vm06.qbbldl (mgr.14229) 358 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:27.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:27 vm09 bash[34466]: audit 2026-04-15T13:39:27.173461+0000 mon.vm06 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.106:0/1847017940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:27.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:27 vm09 bash[34466]: audit 2026-04-15T13:39:27.173461+0000 mon.vm06 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.106:0/1847017940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:27.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:27 vm06 bash[28114]: cluster 2026-04-15T13:39:26.195928+0000 mgr.vm06.qbbldl (mgr.14229) 358 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:27.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:27 vm06 bash[28114]: cluster 2026-04-15T13:39:26.195928+0000 mgr.vm06.qbbldl (mgr.14229) 358 : cluster [DBG] pgmap v185: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:27.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:27 vm06 bash[28114]: audit 2026-04-15T13:39:27.173461+0000 mon.vm06 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.106:0/1847017940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:27.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:27 vm06 bash[28114]: audit 2026-04-15T13:39:27.173461+0000 mon.vm06 (mon.0) 1009 : audit [DBG] from='client.? 192.168.123.106:0/1847017940' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:28 vm09 bash[34466]: audit 2026-04-15T13:39:26.735771+0000 mgr.vm06.qbbldl (mgr.14229) 359 : audit [DBG] from='client.15246 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:28 vm09 bash[34466]: audit 2026-04-15T13:39:26.735771+0000 mgr.vm06.qbbldl (mgr.14229) 359 : audit [DBG] from='client.15246 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:28 vm09 bash[34466]: audit 2026-04-15T13:39:26.942831+0000 mgr.vm06.qbbldl (mgr.14229) 360 : audit [DBG] from='client.15250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:28 vm09 bash[34466]: audit 2026-04-15T13:39:26.942831+0000 mgr.vm06.qbbldl (mgr.14229) 360 : audit [DBG] from='client.15250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:28 vm06 bash[28114]: audit 2026-04-15T13:39:26.735771+0000 mgr.vm06.qbbldl (mgr.14229) 359 : audit [DBG] from='client.15246 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:28 vm06 bash[28114]: audit 2026-04-15T13:39:26.735771+0000 mgr.vm06.qbbldl (mgr.14229) 359 : audit [DBG] from='client.15246 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:28 vm06 bash[28114]: audit 2026-04-15T13:39:26.942831+0000 mgr.vm06.qbbldl (mgr.14229) 360 : audit [DBG] from='client.15250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:28.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:28 vm06 bash[28114]: audit 2026-04-15T13:39:26.942831+0000 mgr.vm06.qbbldl (mgr.14229) 360 : audit [DBG] from='client.15250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:29.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:29 vm09 bash[34466]: cluster 2026-04-15T13:39:28.196477+0000 mgr.vm06.qbbldl (mgr.14229) 361 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:29.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:29 vm09 bash[34466]: cluster 2026-04-15T13:39:28.196477+0000 mgr.vm06.qbbldl (mgr.14229) 361 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:29.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:29 vm06 bash[28114]: cluster 2026-04-15T13:39:28.196477+0000 mgr.vm06.qbbldl (mgr.14229) 361 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:29 vm06 bash[28114]: cluster 2026-04-15T13:39:28.196477+0000 mgr.vm06.qbbldl (mgr.14229) 361 : cluster [DBG] pgmap v186: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:31 vm09 bash[34466]: cluster 2026-04-15T13:39:30.196994+0000 mgr.vm06.qbbldl (mgr.14229) 362 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:31 vm09 bash[34466]: cluster 2026-04-15T13:39:30.196994+0000 mgr.vm06.qbbldl (mgr.14229) 362 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:31.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:31 vm06 bash[28114]: cluster 2026-04-15T13:39:30.196994+0000 mgr.vm06.qbbldl (mgr.14229) 362 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:31.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:31 vm06 bash[28114]: cluster 2026-04-15T13:39:30.196994+0000 mgr.vm06.qbbldl (mgr.14229) 362 : cluster [DBG] pgmap v187: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:32.380 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:32.559 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:32.559 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 28s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:32.559 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 28s ago 4m - - 2026-04-15T13:39:32.559 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 29s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:32.559 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 29s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:32.778 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:32.779 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:32.779 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:33 vm09 bash[34466]: cluster 2026-04-15T13:39:32.197422+0000 mgr.vm06.qbbldl (mgr.14229) 363 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:33 vm09 bash[34466]: cluster 2026-04-15T13:39:32.197422+0000 mgr.vm06.qbbldl (mgr.14229) 363 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:33 vm09 bash[34466]: audit 2026-04-15T13:39:32.362204+0000 mgr.vm06.qbbldl (mgr.14229) 364 : audit [DBG] from='client.15258 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:33 vm09 bash[34466]: audit 2026-04-15T13:39:32.362204+0000 mgr.vm06.qbbldl (mgr.14229) 364 : audit [DBG] from='client.15258 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:33 vm09 bash[34466]: audit 2026-04-15T13:39:32.776761+0000 mon.vm06 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.106:0/306965299' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:33 vm09 bash[34466]: audit 2026-04-15T13:39:32.776761+0000 mon.vm06 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.106:0/306965299' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:33 vm06 bash[28114]: cluster 2026-04-15T13:39:32.197422+0000 mgr.vm06.qbbldl (mgr.14229) 363 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:33 vm06 bash[28114]: cluster 2026-04-15T13:39:32.197422+0000 mgr.vm06.qbbldl (mgr.14229) 363 : cluster [DBG] pgmap v188: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:33 vm06 bash[28114]: audit 2026-04-15T13:39:32.362204+0000 mgr.vm06.qbbldl (mgr.14229) 364 : audit [DBG] from='client.15258 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:33 vm06 bash[28114]: audit 2026-04-15T13:39:32.362204+0000 mgr.vm06.qbbldl (mgr.14229) 364 : audit [DBG] from='client.15258 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:33 vm06 bash[28114]: audit 2026-04-15T13:39:32.776761+0000 mon.vm06 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.106:0/306965299' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:33 vm06 bash[28114]: audit 2026-04-15T13:39:32.776761+0000 mon.vm06 (mon.0) 1010 : audit [DBG] from='client.? 192.168.123.106:0/306965299' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:34.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:34 vm09 bash[34466]: audit 2026-04-15T13:39:32.555131+0000 mgr.vm06.qbbldl (mgr.14229) 365 : audit [DBG] from='client.15262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:34.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:34 vm09 bash[34466]: audit 2026-04-15T13:39:32.555131+0000 mgr.vm06.qbbldl (mgr.14229) 365 : audit [DBG] from='client.15262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:34.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:34 vm06 bash[28114]: audit 2026-04-15T13:39:32.555131+0000 mgr.vm06.qbbldl (mgr.14229) 365 : audit [DBG] from='client.15262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:34.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:34 vm06 bash[28114]: audit 2026-04-15T13:39:32.555131+0000 mgr.vm06.qbbldl (mgr.14229) 365 : audit [DBG] from='client.15262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:35 vm09 bash[34466]: cluster 2026-04-15T13:39:34.197869+0000 mgr.vm06.qbbldl (mgr.14229) 366 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:35 vm09 bash[34466]: cluster 2026-04-15T13:39:34.197869+0000 mgr.vm06.qbbldl (mgr.14229) 366 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:35.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:35 vm06 bash[28114]: cluster 2026-04-15T13:39:34.197869+0000 mgr.vm06.qbbldl (mgr.14229) 366 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:35.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:35 vm06 bash[28114]: cluster 2026-04-15T13:39:34.197869+0000 mgr.vm06.qbbldl (mgr.14229) 366 : cluster [DBG] pgmap v189: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:37 vm09 bash[34466]: cluster 2026-04-15T13:39:36.198322+0000 mgr.vm06.qbbldl (mgr.14229) 367 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:37 vm09 bash[34466]: cluster 2026-04-15T13:39:36.198322+0000 mgr.vm06.qbbldl (mgr.14229) 367 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:37.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:37 vm06 bash[28114]: cluster 2026-04-15T13:39:36.198322+0000 mgr.vm06.qbbldl (mgr.14229) 367 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:37.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:37 vm06 bash[28114]: cluster 2026-04-15T13:39:36.198322+0000 mgr.vm06.qbbldl (mgr.14229) 367 : cluster [DBG] pgmap v190: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:37.992 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:38.179 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:38.179 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 34s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:38.180 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 34s ago 4m - - 2026-04-15T13:39:38.180 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 34s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:38.180 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 34s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:38.409 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:38.409 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:38.409 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:37.968671+0000 mgr.vm06.qbbldl (mgr.14229) 368 : audit [DBG] from='client.15270 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:37.968671+0000 mgr.vm06.qbbldl (mgr.14229) 368 : audit [DBG] from='client.15270 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:38.175118+0000 mgr.vm06.qbbldl (mgr.14229) 369 : audit [DBG] from='client.15274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:38.175118+0000 mgr.vm06.qbbldl (mgr.14229) 369 : audit [DBG] from='client.15274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: cluster 2026-04-15T13:39:38.198786+0000 mgr.vm06.qbbldl (mgr.14229) 370 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: cluster 2026-04-15T13:39:38.198786+0000 mgr.vm06.qbbldl (mgr.14229) 370 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:38.406876+0000 mon.vm06 (mon.0) 1011 : audit [DBG] from='client.? 192.168.123.106:0/2378947970' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:38.406876+0000 mon.vm06 (mon.0) 1011 : audit [DBG] from='client.? 192.168.123.106:0/2378947970' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:38.485371+0000 mon.vm06 (mon.0) 1012 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:39 vm09 bash[34466]: audit 2026-04-15T13:39:38.485371+0000 mon.vm06 (mon.0) 1012 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:39.764 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:37.968671+0000 mgr.vm06.qbbldl (mgr.14229) 368 : audit [DBG] from='client.15270 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:37.968671+0000 mgr.vm06.qbbldl (mgr.14229) 368 : audit [DBG] from='client.15270 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:38.175118+0000 mgr.vm06.qbbldl (mgr.14229) 369 : audit [DBG] from='client.15274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:38.175118+0000 mgr.vm06.qbbldl (mgr.14229) 369 : audit [DBG] from='client.15274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: cluster 2026-04-15T13:39:38.198786+0000 mgr.vm06.qbbldl (mgr.14229) 370 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: cluster 2026-04-15T13:39:38.198786+0000 mgr.vm06.qbbldl (mgr.14229) 370 : cluster [DBG] pgmap v191: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:38.406876+0000 mon.vm06 (mon.0) 1011 : audit [DBG] from='client.? 192.168.123.106:0/2378947970' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:38.406876+0000 mon.vm06 (mon.0) 1011 : audit [DBG] from='client.? 192.168.123.106:0/2378947970' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:38.485371+0000 mon.vm06 (mon.0) 1012 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:39 vm06 bash[28114]: audit 2026-04-15T13:39:38.485371+0000 mon.vm06 (mon.0) 1012 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:41 vm09 bash[34466]: cluster 2026-04-15T13:39:40.199197+0000 mgr.vm06.qbbldl (mgr.14229) 371 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:41 vm09 bash[34466]: cluster 2026-04-15T13:39:40.199197+0000 mgr.vm06.qbbldl (mgr.14229) 371 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:41.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:41 vm06 bash[28114]: cluster 2026-04-15T13:39:40.199197+0000 mgr.vm06.qbbldl (mgr.14229) 371 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:41.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:41 vm06 bash[28114]: cluster 2026-04-15T13:39:40.199197+0000 mgr.vm06.qbbldl (mgr.14229) 371 : cluster [DBG] pgmap v192: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:43.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:43 vm09 bash[34466]: cluster 2026-04-15T13:39:42.199600+0000 mgr.vm06.qbbldl (mgr.14229) 372 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:43.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:43 vm09 bash[34466]: cluster 2026-04-15T13:39:42.199600+0000 mgr.vm06.qbbldl (mgr.14229) 372 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:43.635 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:43.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:43 vm06 bash[28114]: cluster 2026-04-15T13:39:42.199600+0000 mgr.vm06.qbbldl (mgr.14229) 372 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:43.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:43 vm06 bash[28114]: cluster 2026-04-15T13:39:42.199600+0000 mgr.vm06.qbbldl (mgr.14229) 372 : cluster [DBG] pgmap v193: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:43.820 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:43.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 39s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:43.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 39s ago 4m - - 2026-04-15T13:39:43.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 40s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:43.820 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 40s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:44.077 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:44.077 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:44.077 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:44.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:44 vm06 bash[28114]: audit 2026-04-15T13:39:44.075186+0000 mon.vm06 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.106:0/2290991470' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:44.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:44 vm06 bash[28114]: audit 2026-04-15T13:39:44.075186+0000 mon.vm06 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.106:0/2290991470' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:44.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:44 vm09 bash[34466]: audit 2026-04-15T13:39:44.075186+0000 mon.vm06 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.106:0/2290991470' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:44.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:44 vm09 bash[34466]: audit 2026-04-15T13:39:44.075186+0000 mon.vm06 (mon.0) 1013 : audit [DBG] from='client.? 192.168.123.106:0/2290991470' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:45.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:45 vm06 bash[28114]: audit 2026-04-15T13:39:43.615666+0000 mgr.vm06.qbbldl (mgr.14229) 373 : audit [DBG] from='client.15282 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:45 vm06 bash[28114]: audit 2026-04-15T13:39:43.615666+0000 mgr.vm06.qbbldl (mgr.14229) 373 : audit [DBG] from='client.15282 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:45 vm06 bash[28114]: audit 2026-04-15T13:39:43.815456+0000 mgr.vm06.qbbldl (mgr.14229) 374 : audit [DBG] from='client.15286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:45 vm06 bash[28114]: audit 2026-04-15T13:39:43.815456+0000 mgr.vm06.qbbldl (mgr.14229) 374 : audit [DBG] from='client.15286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:45 vm06 bash[28114]: cluster 2026-04-15T13:39:44.200018+0000 mgr.vm06.qbbldl (mgr.14229) 375 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:45.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:45 vm06 bash[28114]: cluster 2026-04-15T13:39:44.200018+0000 mgr.vm06.qbbldl (mgr.14229) 375 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:45.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:45 vm09 bash[34466]: audit 2026-04-15T13:39:43.615666+0000 mgr.vm06.qbbldl (mgr.14229) 373 : audit [DBG] from='client.15282 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:45 vm09 bash[34466]: audit 2026-04-15T13:39:43.615666+0000 mgr.vm06.qbbldl (mgr.14229) 373 : audit [DBG] from='client.15282 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:45 vm09 bash[34466]: audit 2026-04-15T13:39:43.815456+0000 mgr.vm06.qbbldl (mgr.14229) 374 : audit [DBG] from='client.15286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:45 vm09 bash[34466]: audit 2026-04-15T13:39:43.815456+0000 mgr.vm06.qbbldl (mgr.14229) 374 : audit [DBG] from='client.15286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:45 vm09 bash[34466]: cluster 2026-04-15T13:39:44.200018+0000 mgr.vm06.qbbldl (mgr.14229) 375 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:45 vm09 bash[34466]: cluster 2026-04-15T13:39:44.200018+0000 mgr.vm06.qbbldl (mgr.14229) 375 : cluster [DBG] pgmap v194: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:47.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:47 vm06 bash[28114]: cluster 2026-04-15T13:39:46.200474+0000 mgr.vm06.qbbldl (mgr.14229) 376 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:47.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:47 vm06 bash[28114]: cluster 2026-04-15T13:39:46.200474+0000 mgr.vm06.qbbldl (mgr.14229) 376 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:47.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:47 vm09 bash[34466]: cluster 2026-04-15T13:39:46.200474+0000 mgr.vm06.qbbldl (mgr.14229) 376 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:47.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:47 vm09 bash[34466]: cluster 2026-04-15T13:39:46.200474+0000 mgr.vm06.qbbldl (mgr.14229) 376 : cluster [DBG] pgmap v195: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:49.290 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:49.488 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:49.488 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 45s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:49.488 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 45s ago 4m - - 2026-04-15T13:39:49.488 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 46s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:49.488 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 46s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:49.734 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:49.734 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:49.734 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:49.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:49 vm06 bash[28114]: cluster 2026-04-15T13:39:48.200954+0000 mgr.vm06.qbbldl (mgr.14229) 377 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:49.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:49 vm06 bash[28114]: cluster 2026-04-15T13:39:48.200954+0000 mgr.vm06.qbbldl (mgr.14229) 377 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:49.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:49 vm09 bash[34466]: cluster 2026-04-15T13:39:48.200954+0000 mgr.vm06.qbbldl (mgr.14229) 377 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:49.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:49 vm09 bash[34466]: cluster 2026-04-15T13:39:48.200954+0000 mgr.vm06.qbbldl (mgr.14229) 377 : cluster [DBG] pgmap v196: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:50.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:50 vm06 bash[28114]: audit 2026-04-15T13:39:49.270837+0000 mgr.vm06.qbbldl (mgr.14229) 378 : audit [DBG] from='client.15294 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:50.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:50 vm06 bash[28114]: audit 2026-04-15T13:39:49.270837+0000 mgr.vm06.qbbldl (mgr.14229) 378 : audit [DBG] from='client.15294 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:50.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:50 vm06 bash[28114]: audit 2026-04-15T13:39:49.732137+0000 mon.vm06 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.106:0/2545805885' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:50.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:50 vm06 bash[28114]: audit 2026-04-15T13:39:49.732137+0000 mon.vm06 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.106:0/2545805885' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:50 vm09 bash[34466]: audit 2026-04-15T13:39:49.270837+0000 mgr.vm06.qbbldl (mgr.14229) 378 : audit [DBG] from='client.15294 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:50 vm09 bash[34466]: audit 2026-04-15T13:39:49.270837+0000 mgr.vm06.qbbldl (mgr.14229) 378 : audit [DBG] from='client.15294 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:50 vm09 bash[34466]: audit 2026-04-15T13:39:49.732137+0000 mon.vm06 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.106:0/2545805885' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:50 vm09 bash[34466]: audit 2026-04-15T13:39:49.732137+0000 mon.vm06 (mon.0) 1014 : audit [DBG] from='client.? 192.168.123.106:0/2545805885' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:51.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:51 vm06 bash[28114]: audit 2026-04-15T13:39:49.482865+0000 mgr.vm06.qbbldl (mgr.14229) 379 : audit [DBG] from='client.15298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:51.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:51 vm06 bash[28114]: audit 2026-04-15T13:39:49.482865+0000 mgr.vm06.qbbldl (mgr.14229) 379 : audit [DBG] from='client.15298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:51.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:51 vm06 bash[28114]: cluster 2026-04-15T13:39:50.201571+0000 mgr.vm06.qbbldl (mgr.14229) 380 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:51.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:51 vm06 bash[28114]: cluster 2026-04-15T13:39:50.201571+0000 mgr.vm06.qbbldl (mgr.14229) 380 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:51.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:51 vm09 bash[34466]: audit 2026-04-15T13:39:49.482865+0000 mgr.vm06.qbbldl (mgr.14229) 379 : audit [DBG] from='client.15298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:51.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:51 vm09 bash[34466]: audit 2026-04-15T13:39:49.482865+0000 mgr.vm06.qbbldl (mgr.14229) 379 : audit [DBG] from='client.15298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:51 vm09 bash[34466]: cluster 2026-04-15T13:39:50.201571+0000 mgr.vm06.qbbldl (mgr.14229) 380 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:51 vm09 bash[34466]: cluster 2026-04-15T13:39:50.201571+0000 mgr.vm06.qbbldl (mgr.14229) 380 : cluster [DBG] pgmap v197: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:39:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:53 vm06 bash[28114]: cluster 2026-04-15T13:39:52.202045+0000 mgr.vm06.qbbldl (mgr.14229) 381 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:53.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:53 vm06 bash[28114]: cluster 2026-04-15T13:39:52.202045+0000 mgr.vm06.qbbldl (mgr.14229) 381 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:53 vm09 bash[34466]: cluster 2026-04-15T13:39:52.202045+0000 mgr.vm06.qbbldl (mgr.14229) 381 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:53.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:53 vm09 bash[34466]: cluster 2026-04-15T13:39:52.202045+0000 mgr.vm06.qbbldl (mgr.14229) 381 : cluster [DBG] pgmap v198: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:39:54.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:54 vm06 bash[28114]: audit 2026-04-15T13:39:53.485694+0000 mon.vm06 (mon.0) 1015 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:54.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:54 vm06 bash[28114]: audit 2026-04-15T13:39:53.485694+0000 mon.vm06 (mon.0) 1015 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:54 vm09 bash[34466]: audit 2026-04-15T13:39:53.485694+0000 mon.vm06 (mon.0) 1015 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:54 vm09 bash[34466]: audit 2026-04-15T13:39:53.485694+0000 mon.vm06 (mon.0) 1015 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:39:54.946 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:39:55.129 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:39:55.129 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 51s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:39:55.129 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 51s ago 4m - - 2026-04-15T13:39:55.129 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 51s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:39:55.129 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 51s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:39:55.368 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:39:55.368 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:39:55.368 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: cluster 2026-04-15T13:39:54.202529+0000 mgr.vm06.qbbldl (mgr.14229) 382 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: cluster 2026-04-15T13:39:54.202529+0000 mgr.vm06.qbbldl (mgr.14229) 382 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: audit 2026-04-15T13:39:54.927097+0000 mgr.vm06.qbbldl (mgr.14229) 383 : audit [DBG] from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: audit 2026-04-15T13:39:54.927097+0000 mgr.vm06.qbbldl (mgr.14229) 383 : audit [DBG] from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: audit 2026-04-15T13:39:55.124061+0000 mgr.vm06.qbbldl (mgr.14229) 384 : audit [DBG] from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: audit 2026-04-15T13:39:55.124061+0000 mgr.vm06.qbbldl (mgr.14229) 384 : audit [DBG] from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: audit 2026-04-15T13:39:55.366297+0000 mon.vm06 (mon.0) 1016 : audit [DBG] from='client.? 192.168.123.106:0/2502971508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:55.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:55 vm06 bash[28114]: audit 2026-04-15T13:39:55.366297+0000 mon.vm06 (mon.0) 1016 : audit [DBG] from='client.? 192.168.123.106:0/2502971508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: cluster 2026-04-15T13:39:54.202529+0000 mgr.vm06.qbbldl (mgr.14229) 382 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: cluster 2026-04-15T13:39:54.202529+0000 mgr.vm06.qbbldl (mgr.14229) 382 : cluster [DBG] pgmap v199: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: audit 2026-04-15T13:39:54.927097+0000 mgr.vm06.qbbldl (mgr.14229) 383 : audit [DBG] from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: audit 2026-04-15T13:39:54.927097+0000 mgr.vm06.qbbldl (mgr.14229) 383 : audit [DBG] from='client.15306 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: audit 2026-04-15T13:39:55.124061+0000 mgr.vm06.qbbldl (mgr.14229) 384 : audit [DBG] from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: audit 2026-04-15T13:39:55.124061+0000 mgr.vm06.qbbldl (mgr.14229) 384 : audit [DBG] from='client.15310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: audit 2026-04-15T13:39:55.366297+0000 mon.vm06 (mon.0) 1016 : audit [DBG] from='client.? 192.168.123.106:0/2502971508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:55.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:55 vm09 bash[34466]: audit 2026-04-15T13:39:55.366297+0000 mon.vm06 (mon.0) 1016 : audit [DBG] from='client.? 192.168.123.106:0/2502971508' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:39:56.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:56 vm06 bash[28114]: cluster 2026-04-15T13:39:56.203048+0000 mgr.vm06.qbbldl (mgr.14229) 385 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:56.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:56 vm06 bash[28114]: cluster 2026-04-15T13:39:56.203048+0000 mgr.vm06.qbbldl (mgr.14229) 385 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:56.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:56 vm09 bash[34466]: cluster 2026-04-15T13:39:56.203048+0000 mgr.vm06.qbbldl (mgr.14229) 385 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:56.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:56 vm09 bash[34466]: cluster 2026-04-15T13:39:56.203048+0000 mgr.vm06.qbbldl (mgr.14229) 385 : cluster [DBG] pgmap v200: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:59.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:59 vm06 bash[28114]: cluster 2026-04-15T13:39:58.203697+0000 mgr.vm06.qbbldl (mgr.14229) 386 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:59.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:39:59 vm06 bash[28114]: cluster 2026-04-15T13:39:58.203697+0000 mgr.vm06.qbbldl (mgr.14229) 386 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:59.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:59 vm09 bash[34466]: cluster 2026-04-15T13:39:58.203697+0000 mgr.vm06.qbbldl (mgr.14229) 386 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:39:59.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:39:59 vm09 bash[34466]: cluster 2026-04-15T13:39:58.203697+0000 mgr.vm06.qbbldl (mgr.14229) 386 : cluster [DBG] pgmap v201: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:00.581 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:00 vm09 bash[34466]: cluster 2026-04-15T13:40:00.000116+0000 mon.vm06 (mon.0) 1017 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:00 vm09 bash[34466]: cluster 2026-04-15T13:40:00.000116+0000 mon.vm06 (mon.0) 1017 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:00 vm09 bash[34466]: cluster 2026-04-15T13:40:00.000140+0000 mon.vm06 (mon.0) 1018 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:00 vm09 bash[34466]: cluster 2026-04-15T13:40:00.000140+0000 mon.vm06 (mon.0) 1018 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:00 vm09 bash[34466]: cluster 2026-04-15T13:40:00.000147+0000 mon.vm06 (mon.0) 1019 : cluster [WRN] daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:00 vm09 bash[34466]: cluster 2026-04-15T13:40:00.000147+0000 mon.vm06 (mon.0) 1019 : cluster [WRN] daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:00.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:00 vm06 bash[28114]: cluster 2026-04-15T13:40:00.000116+0000 mon.vm06 (mon.0) 1017 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:00.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:00 vm06 bash[28114]: cluster 2026-04-15T13:40:00.000116+0000 mon.vm06 (mon.0) 1017 : cluster [WRN] Health detail: HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:00.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:00 vm06 bash[28114]: cluster 2026-04-15T13:40:00.000140+0000 mon.vm06 (mon.0) 1018 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:00.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:00 vm06 bash[28114]: cluster 2026-04-15T13:40:00.000140+0000 mon.vm06 (mon.0) 1018 : cluster [WRN] [WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:00.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:00 vm06 bash[28114]: cluster 2026-04-15T13:40:00.000147+0000 mon.vm06 (mon.0) 1019 : cluster [WRN] daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:00.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:00 vm06 bash[28114]: cluster 2026-04-15T13:40:00.000147+0000 mon.vm06 (mon.0) 1019 : cluster [WRN] daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:00.773 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:00.773 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 56s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:00.773 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 56s ago 4m - - 2026-04-15T13:40:00.773 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 57s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:00.773 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 57s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:01.018 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:01.018 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:01.018 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:01 vm09 bash[34466]: cluster 2026-04-15T13:40:00.204191+0000 mgr.vm06.qbbldl (mgr.14229) 387 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:01 vm09 bash[34466]: cluster 2026-04-15T13:40:00.204191+0000 mgr.vm06.qbbldl (mgr.14229) 387 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:01 vm09 bash[34466]: audit 2026-04-15T13:40:01.016458+0000 mon.vm06 (mon.0) 1020 : audit [DBG] from='client.? 192.168.123.106:0/3996672554' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:01 vm09 bash[34466]: audit 2026-04-15T13:40:01.016458+0000 mon.vm06 (mon.0) 1020 : audit [DBG] from='client.? 192.168.123.106:0/3996672554' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:01.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:01 vm06 bash[28114]: cluster 2026-04-15T13:40:00.204191+0000 mgr.vm06.qbbldl (mgr.14229) 387 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:01.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:01 vm06 bash[28114]: cluster 2026-04-15T13:40:00.204191+0000 mgr.vm06.qbbldl (mgr.14229) 387 : cluster [DBG] pgmap v202: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:01.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:01 vm06 bash[28114]: audit 2026-04-15T13:40:01.016458+0000 mon.vm06 (mon.0) 1020 : audit [DBG] from='client.? 192.168.123.106:0/3996672554' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:01.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:01 vm06 bash[28114]: audit 2026-04-15T13:40:01.016458+0000 mon.vm06 (mon.0) 1020 : audit [DBG] from='client.? 192.168.123.106:0/3996672554' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:02 vm09 bash[34466]: audit 2026-04-15T13:40:00.562677+0000 mgr.vm06.qbbldl (mgr.14229) 388 : audit [DBG] from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:02 vm09 bash[34466]: audit 2026-04-15T13:40:00.562677+0000 mgr.vm06.qbbldl (mgr.14229) 388 : audit [DBG] from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:02 vm09 bash[34466]: audit 2026-04-15T13:40:00.767655+0000 mgr.vm06.qbbldl (mgr.14229) 389 : audit [DBG] from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:02 vm09 bash[34466]: audit 2026-04-15T13:40:00.767655+0000 mgr.vm06.qbbldl (mgr.14229) 389 : audit [DBG] from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:02 vm06 bash[28114]: audit 2026-04-15T13:40:00.562677+0000 mgr.vm06.qbbldl (mgr.14229) 388 : audit [DBG] from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:02 vm06 bash[28114]: audit 2026-04-15T13:40:00.562677+0000 mgr.vm06.qbbldl (mgr.14229) 388 : audit [DBG] from='client.15318 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:02 vm06 bash[28114]: audit 2026-04-15T13:40:00.767655+0000 mgr.vm06.qbbldl (mgr.14229) 389 : audit [DBG] from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:02.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:02 vm06 bash[28114]: audit 2026-04-15T13:40:00.767655+0000 mgr.vm06.qbbldl (mgr.14229) 389 : audit [DBG] from='client.15322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:03 vm09 bash[34466]: cluster 2026-04-15T13:40:02.204625+0000 mgr.vm06.qbbldl (mgr.14229) 390 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:03 vm09 bash[34466]: cluster 2026-04-15T13:40:02.204625+0000 mgr.vm06.qbbldl (mgr.14229) 390 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:03.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:03 vm06 bash[28114]: cluster 2026-04-15T13:40:02.204625+0000 mgr.vm06.qbbldl (mgr.14229) 390 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:03.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:03 vm06 bash[28114]: cluster 2026-04-15T13:40:02.204625+0000 mgr.vm06.qbbldl (mgr.14229) 390 : cluster [DBG] pgmap v203: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:04 vm09 bash[34466]: audit 2026-04-15T13:40:04.279087+0000 mon.vm06 (mon.0) 1021 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:40:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:04 vm09 bash[34466]: audit 2026-04-15T13:40:04.279087+0000 mon.vm06 (mon.0) 1021 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:40:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:04 vm06 bash[28114]: audit 2026-04-15T13:40:04.279087+0000 mon.vm06 (mon.0) 1021 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:40:04.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:04 vm06 bash[28114]: audit 2026-04-15T13:40:04.279087+0000 mon.vm06 (mon.0) 1021 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:40:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:05 vm09 bash[34466]: cluster 2026-04-15T13:40:04.205088+0000 mgr.vm06.qbbldl (mgr.14229) 391 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:05 vm09 bash[34466]: cluster 2026-04-15T13:40:04.205088+0000 mgr.vm06.qbbldl (mgr.14229) 391 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:05 vm06 bash[28114]: cluster 2026-04-15T13:40:04.205088+0000 mgr.vm06.qbbldl (mgr.14229) 391 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:05 vm06 bash[28114]: cluster 2026-04-15T13:40:04.205088+0000 mgr.vm06.qbbldl (mgr.14229) 391 : cluster [DBG] pgmap v204: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:06.238 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:06.433 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:06.433 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 62s ago 4m 105M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:06.433 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 62s ago 4m - - 2026-04-15T13:40:06.433 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 63s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:06.433 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 63s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:06.668 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:06.668 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:06.668 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:07 vm09 bash[34466]: cluster 2026-04-15T13:40:06.205551+0000 mgr.vm06.qbbldl (mgr.14229) 392 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:07 vm09 bash[34466]: cluster 2026-04-15T13:40:06.205551+0000 mgr.vm06.qbbldl (mgr.14229) 392 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:07 vm09 bash[34466]: audit 2026-04-15T13:40:06.219901+0000 mgr.vm06.qbbldl (mgr.14229) 393 : audit [DBG] from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:07 vm09 bash[34466]: audit 2026-04-15T13:40:06.219901+0000 mgr.vm06.qbbldl (mgr.14229) 393 : audit [DBG] from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:07 vm09 bash[34466]: audit 2026-04-15T13:40:06.665321+0000 mon.vm06 (mon.0) 1022 : audit [DBG] from='client.? 192.168.123.106:0/286048172' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:07 vm09 bash[34466]: audit 2026-04-15T13:40:06.665321+0000 mon.vm06 (mon.0) 1022 : audit [DBG] from='client.? 192.168.123.106:0/286048172' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:07.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:07 vm06 bash[28114]: cluster 2026-04-15T13:40:06.205551+0000 mgr.vm06.qbbldl (mgr.14229) 392 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:07.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:07 vm06 bash[28114]: cluster 2026-04-15T13:40:06.205551+0000 mgr.vm06.qbbldl (mgr.14229) 392 : cluster [DBG] pgmap v205: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:07.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:07 vm06 bash[28114]: audit 2026-04-15T13:40:06.219901+0000 mgr.vm06.qbbldl (mgr.14229) 393 : audit [DBG] from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:07.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:07 vm06 bash[28114]: audit 2026-04-15T13:40:06.219901+0000 mgr.vm06.qbbldl (mgr.14229) 393 : audit [DBG] from='client.15330 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:07.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:07 vm06 bash[28114]: audit 2026-04-15T13:40:06.665321+0000 mon.vm06 (mon.0) 1022 : audit [DBG] from='client.? 192.168.123.106:0/286048172' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:07.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:07 vm06 bash[28114]: audit 2026-04-15T13:40:06.665321+0000 mon.vm06 (mon.0) 1022 : audit [DBG] from='client.? 192.168.123.106:0/286048172' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:08.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:08 vm06 bash[28114]: audit 2026-04-15T13:40:06.428350+0000 mgr.vm06.qbbldl (mgr.14229) 394 : audit [DBG] from='client.15334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:08.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:08 vm06 bash[28114]: audit 2026-04-15T13:40:06.428350+0000 mgr.vm06.qbbldl (mgr.14229) 394 : audit [DBG] from='client.15334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:08.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:08 vm09 bash[34466]: audit 2026-04-15T13:40:06.428350+0000 mgr.vm06.qbbldl (mgr.14229) 394 : audit [DBG] from='client.15334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:08.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:08 vm09 bash[34466]: audit 2026-04-15T13:40:06.428350+0000 mgr.vm06.qbbldl (mgr.14229) 394 : audit [DBG] from='client.15334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:09.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:09 vm06 bash[28114]: cluster 2026-04-15T13:40:08.205976+0000 mgr.vm06.qbbldl (mgr.14229) 395 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:09.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:09 vm06 bash[28114]: cluster 2026-04-15T13:40:08.205976+0000 mgr.vm06.qbbldl (mgr.14229) 395 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:09.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:09 vm06 bash[28114]: audit 2026-04-15T13:40:08.485868+0000 mon.vm06 (mon.0) 1023 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:09.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:09 vm06 bash[28114]: audit 2026-04-15T13:40:08.485868+0000 mon.vm06 (mon.0) 1023 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:09.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:09 vm09 bash[34466]: cluster 2026-04-15T13:40:08.205976+0000 mgr.vm06.qbbldl (mgr.14229) 395 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:09.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:09 vm09 bash[34466]: cluster 2026-04-15T13:40:08.205976+0000 mgr.vm06.qbbldl (mgr.14229) 395 : cluster [DBG] pgmap v206: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:09.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:09 vm09 bash[34466]: audit 2026-04-15T13:40:08.485868+0000 mon.vm06 (mon.0) 1023 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:09.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:09 vm09 bash[34466]: audit 2026-04-15T13:40:08.485868+0000 mon.vm06 (mon.0) 1023 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:09.704917+0000 mon.vm06 (mon.0) 1024 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:09.704917+0000 mon.vm06 (mon.0) 1024 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:09.712039+0000 mon.vm06 (mon.0) 1025 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:09.712039+0000 mon.vm06 (mon.0) 1025 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.040993+0000 mon.vm06 (mon.0) 1026 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.040993+0000 mon.vm06 (mon.0) 1026 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.041672+0000 mon.vm06 (mon.0) 1027 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.041672+0000 mon.vm06 (mon.0) 1027 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: cluster 2026-04-15T13:40:10.042907+0000 mgr.vm06.qbbldl (mgr.14229) 396 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: cluster 2026-04-15T13:40:10.042907+0000 mgr.vm06.qbbldl (mgr.14229) 396 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.207373+0000 mon.vm06 (mon.0) 1028 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.207373+0000 mon.vm06 (mon.0) 1028 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.209593+0000 mon.vm06 (mon.0) 1029 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:40:11.015 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:10 vm06 bash[28114]: audit 2026-04-15T13:40:10.209593+0000 mon.vm06 (mon.0) 1029 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:40:11.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:09.704917+0000 mon.vm06 (mon.0) 1024 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:09.704917+0000 mon.vm06 (mon.0) 1024 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:09.712039+0000 mon.vm06 (mon.0) 1025 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:09.712039+0000 mon.vm06 (mon.0) 1025 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.040993+0000 mon.vm06 (mon.0) 1026 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.040993+0000 mon.vm06 (mon.0) 1026 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.041672+0000 mon.vm06 (mon.0) 1027 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.041672+0000 mon.vm06 (mon.0) 1027 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: cluster 2026-04-15T13:40:10.042907+0000 mgr.vm06.qbbldl (mgr.14229) 396 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: cluster 2026-04-15T13:40:10.042907+0000 mgr.vm06.qbbldl (mgr.14229) 396 : cluster [DBG] pgmap v207: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.207373+0000 mon.vm06 (mon.0) 1028 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.207373+0000 mon.vm06 (mon.0) 1028 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.209593+0000 mon.vm06 (mon.0) 1029 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:40:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:10 vm09 bash[34466]: audit 2026-04-15T13:40:10.209593+0000 mon.vm06 (mon.0) 1029 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:40:11.887 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:12.083 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:12.083 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 2s ago 4m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:12.084 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 2s ago 4m - - 2026-04-15T13:40:12.084 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 68s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:12.084 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 68s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:12.327 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:12.327 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:12.327 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: audit 2026-04-15T13:40:11.865076+0000 mgr.vm06.qbbldl (mgr.14229) 397 : audit [DBG] from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: audit 2026-04-15T13:40:11.865076+0000 mgr.vm06.qbbldl (mgr.14229) 397 : audit [DBG] from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: cluster 2026-04-15T13:40:12.043278+0000 mgr.vm06.qbbldl (mgr.14229) 398 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: cluster 2026-04-15T13:40:12.043278+0000 mgr.vm06.qbbldl (mgr.14229) 398 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: audit 2026-04-15T13:40:12.078820+0000 mgr.vm06.qbbldl (mgr.14229) 399 : audit [DBG] from='client.15346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: audit 2026-04-15T13:40:12.078820+0000 mgr.vm06.qbbldl (mgr.14229) 399 : audit [DBG] from='client.15346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: audit 2026-04-15T13:40:12.324617+0000 mon.vm06 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.106:0/3876803638' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:13 vm06 bash[28114]: audit 2026-04-15T13:40:12.324617+0000 mon.vm06 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.106:0/3876803638' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: audit 2026-04-15T13:40:11.865076+0000 mgr.vm06.qbbldl (mgr.14229) 397 : audit [DBG] from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: audit 2026-04-15T13:40:11.865076+0000 mgr.vm06.qbbldl (mgr.14229) 397 : audit [DBG] from='client.15342 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: cluster 2026-04-15T13:40:12.043278+0000 mgr.vm06.qbbldl (mgr.14229) 398 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: cluster 2026-04-15T13:40:12.043278+0000 mgr.vm06.qbbldl (mgr.14229) 398 : cluster [DBG] pgmap v208: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: audit 2026-04-15T13:40:12.078820+0000 mgr.vm06.qbbldl (mgr.14229) 399 : audit [DBG] from='client.15346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: audit 2026-04-15T13:40:12.078820+0000 mgr.vm06.qbbldl (mgr.14229) 399 : audit [DBG] from='client.15346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: audit 2026-04-15T13:40:12.324617+0000 mon.vm06 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.106:0/3876803638' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:13 vm09 bash[34466]: audit 2026-04-15T13:40:12.324617+0000 mon.vm06 (mon.0) 1030 : audit [DBG] from='client.? 192.168.123.106:0/3876803638' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:15.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:15 vm06 bash[28114]: cluster 2026-04-15T13:40:14.043894+0000 mgr.vm06.qbbldl (mgr.14229) 400 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:15.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:15 vm06 bash[28114]: cluster 2026-04-15T13:40:14.043894+0000 mgr.vm06.qbbldl (mgr.14229) 400 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:15 vm09 bash[34466]: cluster 2026-04-15T13:40:14.043894+0000 mgr.vm06.qbbldl (mgr.14229) 400 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:15 vm09 bash[34466]: cluster 2026-04-15T13:40:14.043894+0000 mgr.vm06.qbbldl (mgr.14229) 400 : cluster [DBG] pgmap v209: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:17.543 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:17 vm09 bash[34466]: cluster 2026-04-15T13:40:16.044464+0000 mgr.vm06.qbbldl (mgr.14229) 401 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:17 vm09 bash[34466]: cluster 2026-04-15T13:40:16.044464+0000 mgr.vm06.qbbldl (mgr.14229) 401 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:17.733 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:17.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (3m) 8s ago 4m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:17.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 8s ago 4m - - 2026-04-15T13:40:17.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 74s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:17.738 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 74s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:17.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:17 vm06 bash[28114]: cluster 2026-04-15T13:40:16.044464+0000 mgr.vm06.qbbldl (mgr.14229) 401 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:17.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:17 vm06 bash[28114]: cluster 2026-04-15T13:40:16.044464+0000 mgr.vm06.qbbldl (mgr.14229) 401 : cluster [DBG] pgmap v210: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:17.992 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:17.992 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:17.992 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:18 vm09 bash[34466]: audit 2026-04-15T13:40:17.990162+0000 mon.vm06 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.106:0/2345916986' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:18 vm09 bash[34466]: audit 2026-04-15T13:40:17.990162+0000 mon.vm06 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.106:0/2345916986' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:18.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:18 vm06 bash[28114]: audit 2026-04-15T13:40:17.990162+0000 mon.vm06 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.106:0/2345916986' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:18.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:18 vm06 bash[28114]: audit 2026-04-15T13:40:17.990162+0000 mon.vm06 (mon.0) 1031 : audit [DBG] from='client.? 192.168.123.106:0/2345916986' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:19 vm09 bash[34466]: audit 2026-04-15T13:40:17.523560+0000 mgr.vm06.qbbldl (mgr.14229) 402 : audit [DBG] from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:19 vm09 bash[34466]: audit 2026-04-15T13:40:17.523560+0000 mgr.vm06.qbbldl (mgr.14229) 402 : audit [DBG] from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:19 vm09 bash[34466]: audit 2026-04-15T13:40:17.728866+0000 mgr.vm06.qbbldl (mgr.14229) 403 : audit [DBG] from='client.15358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:19 vm09 bash[34466]: audit 2026-04-15T13:40:17.728866+0000 mgr.vm06.qbbldl (mgr.14229) 403 : audit [DBG] from='client.15358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:19 vm09 bash[34466]: cluster 2026-04-15T13:40:18.045073+0000 mgr.vm06.qbbldl (mgr.14229) 404 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:19 vm09 bash[34466]: cluster 2026-04-15T13:40:18.045073+0000 mgr.vm06.qbbldl (mgr.14229) 404 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:19.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:19 vm06 bash[28114]: audit 2026-04-15T13:40:17.523560+0000 mgr.vm06.qbbldl (mgr.14229) 402 : audit [DBG] from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:19 vm06 bash[28114]: audit 2026-04-15T13:40:17.523560+0000 mgr.vm06.qbbldl (mgr.14229) 402 : audit [DBG] from='client.15354 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:19 vm06 bash[28114]: audit 2026-04-15T13:40:17.728866+0000 mgr.vm06.qbbldl (mgr.14229) 403 : audit [DBG] from='client.15358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:19 vm06 bash[28114]: audit 2026-04-15T13:40:17.728866+0000 mgr.vm06.qbbldl (mgr.14229) 403 : audit [DBG] from='client.15358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:19.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:19 vm06 bash[28114]: cluster 2026-04-15T13:40:18.045073+0000 mgr.vm06.qbbldl (mgr.14229) 404 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:19.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:19 vm06 bash[28114]: cluster 2026-04-15T13:40:18.045073+0000 mgr.vm06.qbbldl (mgr.14229) 404 : cluster [DBG] pgmap v211: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:21 vm09 bash[34466]: cluster 2026-04-15T13:40:20.045538+0000 mgr.vm06.qbbldl (mgr.14229) 405 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:21 vm09 bash[34466]: cluster 2026-04-15T13:40:20.045538+0000 mgr.vm06.qbbldl (mgr.14229) 405 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:21.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:21 vm06 bash[28114]: cluster 2026-04-15T13:40:20.045538+0000 mgr.vm06.qbbldl (mgr.14229) 405 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:21.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:21 vm06 bash[28114]: cluster 2026-04-15T13:40:20.045538+0000 mgr.vm06.qbbldl (mgr.14229) 405 : cluster [DBG] pgmap v212: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 172 B/s rd, 345 B/s wr, 0 op/s 2026-04-15T13:40:23.202 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:23.392 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:23.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 13s ago 4m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:23.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 13s ago 4m - - 2026-04-15T13:40:23.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 80s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:23.392 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (4m) 80s ago 4m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:23 vm09 bash[34466]: cluster 2026-04-15T13:40:22.045871+0000 mgr.vm06.qbbldl (mgr.14229) 406 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:23 vm09 bash[34466]: cluster 2026-04-15T13:40:22.045871+0000 mgr.vm06.qbbldl (mgr.14229) 406 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:23.636 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:23.636 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:23.636 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:23.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:23 vm06 bash[28114]: cluster 2026-04-15T13:40:22.045871+0000 mgr.vm06.qbbldl (mgr.14229) 406 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:23.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:23 vm06 bash[28114]: cluster 2026-04-15T13:40:22.045871+0000 mgr.vm06.qbbldl (mgr.14229) 406 : cluster [DBG] pgmap v213: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.183394+0000 mgr.vm06.qbbldl (mgr.14229) 407 : audit [DBG] from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.183394+0000 mgr.vm06.qbbldl (mgr.14229) 407 : audit [DBG] from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.387208+0000 mgr.vm06.qbbldl (mgr.14229) 408 : audit [DBG] from='client.15370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.387208+0000 mgr.vm06.qbbldl (mgr.14229) 408 : audit [DBG] from='client.15370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.486053+0000 mon.vm06 (mon.0) 1032 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.486053+0000 mon.vm06 (mon.0) 1032 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.634570+0000 mon.vm06 (mon.0) 1033 : audit [DBG] from='client.? 192.168.123.106:0/1467999426' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:24 vm09 bash[34466]: audit 2026-04-15T13:40:23.634570+0000 mon.vm06 (mon.0) 1033 : audit [DBG] from='client.? 192.168.123.106:0/1467999426' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.183394+0000 mgr.vm06.qbbldl (mgr.14229) 407 : audit [DBG] from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.183394+0000 mgr.vm06.qbbldl (mgr.14229) 407 : audit [DBG] from='client.24889 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.387208+0000 mgr.vm06.qbbldl (mgr.14229) 408 : audit [DBG] from='client.15370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.387208+0000 mgr.vm06.qbbldl (mgr.14229) 408 : audit [DBG] from='client.15370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.486053+0000 mon.vm06 (mon.0) 1032 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.486053+0000 mon.vm06 (mon.0) 1032 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.634570+0000 mon.vm06 (mon.0) 1033 : audit [DBG] from='client.? 192.168.123.106:0/1467999426' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:24.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:24 vm06 bash[28114]: audit 2026-04-15T13:40:23.634570+0000 mon.vm06 (mon.0) 1033 : audit [DBG] from='client.? 192.168.123.106:0/1467999426' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:25.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:25 vm09 bash[34466]: cluster 2026-04-15T13:40:24.046281+0000 mgr.vm06.qbbldl (mgr.14229) 409 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:25.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:25 vm09 bash[34466]: cluster 2026-04-15T13:40:24.046281+0000 mgr.vm06.qbbldl (mgr.14229) 409 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:25.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:25 vm06 bash[28114]: cluster 2026-04-15T13:40:24.046281+0000 mgr.vm06.qbbldl (mgr.14229) 409 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:25.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:25 vm06 bash[28114]: cluster 2026-04-15T13:40:24.046281+0000 mgr.vm06.qbbldl (mgr.14229) 409 : cluster [DBG] pgmap v214: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:27.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:27 vm06 bash[28114]: cluster 2026-04-15T13:40:26.046729+0000 mgr.vm06.qbbldl (mgr.14229) 410 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:27.772 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:27 vm06 bash[28114]: cluster 2026-04-15T13:40:26.046729+0000 mgr.vm06.qbbldl (mgr.14229) 410 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:27.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:27 vm09 bash[34466]: cluster 2026-04-15T13:40:26.046729+0000 mgr.vm06.qbbldl (mgr.14229) 410 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:27.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:27 vm09 bash[34466]: cluster 2026-04-15T13:40:26.046729+0000 mgr.vm06.qbbldl (mgr.14229) 410 : cluster [DBG] pgmap v215: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:28.854 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:29.037 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:29.037 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 19s ago 4m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:29.037 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 19s ago 5m - - 2026-04-15T13:40:29.037 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 85s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:29.037 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 85s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:29.282 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:29.282 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:29.282 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:29 vm06 bash[28114]: cluster 2026-04-15T13:40:28.047211+0000 mgr.vm06.qbbldl (mgr.14229) 411 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:29 vm06 bash[28114]: cluster 2026-04-15T13:40:28.047211+0000 mgr.vm06.qbbldl (mgr.14229) 411 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:29 vm06 bash[28114]: audit 2026-04-15T13:40:29.281857+0000 mon.vm09 (mon.1) 34 : audit [DBG] from='client.? 192.168.123.106:0/2295293824' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:29 vm06 bash[28114]: audit 2026-04-15T13:40:29.281857+0000 mon.vm09 (mon.1) 34 : audit [DBG] from='client.? 192.168.123.106:0/2295293824' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:29 vm09 bash[34466]: cluster 2026-04-15T13:40:28.047211+0000 mgr.vm06.qbbldl (mgr.14229) 411 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:29 vm09 bash[34466]: cluster 2026-04-15T13:40:28.047211+0000 mgr.vm06.qbbldl (mgr.14229) 411 : cluster [DBG] pgmap v216: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:29 vm09 bash[34466]: audit 2026-04-15T13:40:29.281857+0000 mon.vm09 (mon.1) 34 : audit [DBG] from='client.? 192.168.123.106:0/2295293824' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:29 vm09 bash[34466]: audit 2026-04-15T13:40:29.281857+0000 mon.vm09 (mon.1) 34 : audit [DBG] from='client.? 192.168.123.106:0/2295293824' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:30.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:30 vm06 bash[28114]: audit 2026-04-15T13:40:28.834730+0000 mgr.vm06.qbbldl (mgr.14229) 412 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:30 vm06 bash[28114]: audit 2026-04-15T13:40:28.834730+0000 mgr.vm06.qbbldl (mgr.14229) 412 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:30 vm06 bash[28114]: audit 2026-04-15T13:40:29.032603+0000 mgr.vm06.qbbldl (mgr.14229) 413 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:30 vm06 bash[28114]: audit 2026-04-15T13:40:29.032603+0000 mgr.vm06.qbbldl (mgr.14229) 413 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:30 vm06 bash[28114]: cluster 2026-04-15T13:40:30.047840+0000 mgr.vm06.qbbldl (mgr.14229) 414 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:30.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:30 vm06 bash[28114]: cluster 2026-04-15T13:40:30.047840+0000 mgr.vm06.qbbldl (mgr.14229) 414 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:30 vm09 bash[34466]: audit 2026-04-15T13:40:28.834730+0000 mgr.vm06.qbbldl (mgr.14229) 412 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:30 vm09 bash[34466]: audit 2026-04-15T13:40:28.834730+0000 mgr.vm06.qbbldl (mgr.14229) 412 : audit [DBG] from='client.15376 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:30 vm09 bash[34466]: audit 2026-04-15T13:40:29.032603+0000 mgr.vm06.qbbldl (mgr.14229) 413 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:30 vm09 bash[34466]: audit 2026-04-15T13:40:29.032603+0000 mgr.vm06.qbbldl (mgr.14229) 413 : audit [DBG] from='client.15380 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:30 vm09 bash[34466]: cluster 2026-04-15T13:40:30.047840+0000 mgr.vm06.qbbldl (mgr.14229) 414 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:30 vm09 bash[34466]: cluster 2026-04-15T13:40:30.047840+0000 mgr.vm06.qbbldl (mgr.14229) 414 : cluster [DBG] pgmap v217: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:33.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:33 vm09 bash[34466]: cluster 2026-04-15T13:40:32.048185+0000 mgr.vm06.qbbldl (mgr.14229) 415 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:33.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:33 vm09 bash[34466]: cluster 2026-04-15T13:40:32.048185+0000 mgr.vm06.qbbldl (mgr.14229) 415 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:33.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:33 vm06 bash[28114]: cluster 2026-04-15T13:40:32.048185+0000 mgr.vm06.qbbldl (mgr.14229) 415 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:33.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:33 vm06 bash[28114]: cluster 2026-04-15T13:40:32.048185+0000 mgr.vm06.qbbldl (mgr.14229) 415 : cluster [DBG] pgmap v218: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:34.501 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:34.705 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:34.705 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 25s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:34.705 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 25s ago 5m - - 2026-04-15T13:40:34.705 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 91s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:34.705 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 91s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:34.943 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:34.943 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:34.943 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:35.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:35 vm06 bash[28114]: cluster 2026-04-15T13:40:34.048630+0000 mgr.vm06.qbbldl (mgr.14229) 416 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:35.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:35 vm06 bash[28114]: cluster 2026-04-15T13:40:34.048630+0000 mgr.vm06.qbbldl (mgr.14229) 416 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:35.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:35 vm06 bash[28114]: audit 2026-04-15T13:40:34.940843+0000 mon.vm06 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.106:0/2658710286' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:35.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:35 vm06 bash[28114]: audit 2026-04-15T13:40:34.940843+0000 mon.vm06 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.106:0/2658710286' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:35 vm09 bash[34466]: cluster 2026-04-15T13:40:34.048630+0000 mgr.vm06.qbbldl (mgr.14229) 416 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:35 vm09 bash[34466]: cluster 2026-04-15T13:40:34.048630+0000 mgr.vm06.qbbldl (mgr.14229) 416 : cluster [DBG] pgmap v219: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:35 vm09 bash[34466]: audit 2026-04-15T13:40:34.940843+0000 mon.vm06 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.106:0/2658710286' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:35 vm09 bash[34466]: audit 2026-04-15T13:40:34.940843+0000 mon.vm06 (mon.0) 1034 : audit [DBG] from='client.? 192.168.123.106:0/2658710286' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:36.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:36 vm06 bash[28114]: audit 2026-04-15T13:40:34.482176+0000 mgr.vm06.qbbldl (mgr.14229) 417 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:36 vm06 bash[28114]: audit 2026-04-15T13:40:34.482176+0000 mgr.vm06.qbbldl (mgr.14229) 417 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:36 vm06 bash[28114]: audit 2026-04-15T13:40:34.700354+0000 mgr.vm06.qbbldl (mgr.14229) 418 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:36 vm06 bash[28114]: audit 2026-04-15T13:40:34.700354+0000 mgr.vm06.qbbldl (mgr.14229) 418 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:36 vm09 bash[34466]: audit 2026-04-15T13:40:34.482176+0000 mgr.vm06.qbbldl (mgr.14229) 417 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:36 vm09 bash[34466]: audit 2026-04-15T13:40:34.482176+0000 mgr.vm06.qbbldl (mgr.14229) 417 : audit [DBG] from='client.15388 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:36 vm09 bash[34466]: audit 2026-04-15T13:40:34.700354+0000 mgr.vm06.qbbldl (mgr.14229) 418 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:36 vm09 bash[34466]: audit 2026-04-15T13:40:34.700354+0000 mgr.vm06.qbbldl (mgr.14229) 418 : audit [DBG] from='client.15392 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:37.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:37 vm06 bash[28114]: cluster 2026-04-15T13:40:36.049173+0000 mgr.vm06.qbbldl (mgr.14229) 419 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:37.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:37 vm06 bash[28114]: cluster 2026-04-15T13:40:36.049173+0000 mgr.vm06.qbbldl (mgr.14229) 419 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:37 vm09 bash[34466]: cluster 2026-04-15T13:40:36.049173+0000 mgr.vm06.qbbldl (mgr.14229) 419 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:37 vm09 bash[34466]: cluster 2026-04-15T13:40:36.049173+0000 mgr.vm06.qbbldl (mgr.14229) 419 : cluster [DBG] pgmap v220: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:39.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:39 vm06 bash[28114]: cluster 2026-04-15T13:40:38.049546+0000 mgr.vm06.qbbldl (mgr.14229) 420 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:39.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:39 vm06 bash[28114]: cluster 2026-04-15T13:40:38.049546+0000 mgr.vm06.qbbldl (mgr.14229) 420 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:39.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:39 vm06 bash[28114]: audit 2026-04-15T13:40:38.486198+0000 mon.vm06 (mon.0) 1035 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:39.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:39 vm06 bash[28114]: audit 2026-04-15T13:40:38.486198+0000 mon.vm06 (mon.0) 1035 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:39 vm09 bash[34466]: cluster 2026-04-15T13:40:38.049546+0000 mgr.vm06.qbbldl (mgr.14229) 420 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:39 vm09 bash[34466]: cluster 2026-04-15T13:40:38.049546+0000 mgr.vm06.qbbldl (mgr.14229) 420 : cluster [DBG] pgmap v221: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:39 vm09 bash[34466]: audit 2026-04-15T13:40:38.486198+0000 mon.vm06 (mon.0) 1035 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:39.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:39 vm09 bash[34466]: audit 2026-04-15T13:40:38.486198+0000 mon.vm06 (mon.0) 1035 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:40.163 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:40.353 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:40.353 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 30s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:40.353 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 30s ago 5m - - 2026-04-15T13:40:40.353 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 97s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:40.353 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 97s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:40.589 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:40.589 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:40.589 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: cluster 2026-04-15T13:40:40.049982+0000 mgr.vm06.qbbldl (mgr.14229) 421 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: cluster 2026-04-15T13:40:40.049982+0000 mgr.vm06.qbbldl (mgr.14229) 421 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: audit 2026-04-15T13:40:40.143492+0000 mgr.vm06.qbbldl (mgr.14229) 422 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: audit 2026-04-15T13:40:40.143492+0000 mgr.vm06.qbbldl (mgr.14229) 422 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: audit 2026-04-15T13:40:40.347935+0000 mgr.vm06.qbbldl (mgr.14229) 423 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: audit 2026-04-15T13:40:40.347935+0000 mgr.vm06.qbbldl (mgr.14229) 423 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: audit 2026-04-15T13:40:40.587254+0000 mon.vm06 (mon.0) 1036 : audit [DBG] from='client.? 192.168.123.106:0/2754103224' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:41.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:41 vm06 bash[28114]: audit 2026-04-15T13:40:40.587254+0000 mon.vm06 (mon.0) 1036 : audit [DBG] from='client.? 192.168.123.106:0/2754103224' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: cluster 2026-04-15T13:40:40.049982+0000 mgr.vm06.qbbldl (mgr.14229) 421 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: cluster 2026-04-15T13:40:40.049982+0000 mgr.vm06.qbbldl (mgr.14229) 421 : cluster [DBG] pgmap v222: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: audit 2026-04-15T13:40:40.143492+0000 mgr.vm06.qbbldl (mgr.14229) 422 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: audit 2026-04-15T13:40:40.143492+0000 mgr.vm06.qbbldl (mgr.14229) 422 : audit [DBG] from='client.15400 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: audit 2026-04-15T13:40:40.347935+0000 mgr.vm06.qbbldl (mgr.14229) 423 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: audit 2026-04-15T13:40:40.347935+0000 mgr.vm06.qbbldl (mgr.14229) 423 : audit [DBG] from='client.15404 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: audit 2026-04-15T13:40:40.587254+0000 mon.vm06 (mon.0) 1036 : audit [DBG] from='client.? 192.168.123.106:0/2754103224' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:41.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:41 vm09 bash[34466]: audit 2026-04-15T13:40:40.587254+0000 mon.vm06 (mon.0) 1036 : audit [DBG] from='client.? 192.168.123.106:0/2754103224' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:43.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:43 vm06 bash[28114]: cluster 2026-04-15T13:40:42.050302+0000 mgr.vm06.qbbldl (mgr.14229) 424 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:43.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:43 vm06 bash[28114]: cluster 2026-04-15T13:40:42.050302+0000 mgr.vm06.qbbldl (mgr.14229) 424 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:43.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:43 vm09 bash[34466]: cluster 2026-04-15T13:40:42.050302+0000 mgr.vm06.qbbldl (mgr.14229) 424 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:43.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:43 vm09 bash[34466]: cluster 2026-04-15T13:40:42.050302+0000 mgr.vm06.qbbldl (mgr.14229) 424 : cluster [DBG] pgmap v223: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:45.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:45 vm06 bash[28114]: cluster 2026-04-15T13:40:44.050657+0000 mgr.vm06.qbbldl (mgr.14229) 425 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:45.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:45 vm06 bash[28114]: cluster 2026-04-15T13:40:44.050657+0000 mgr.vm06.qbbldl (mgr.14229) 425 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:45.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:45 vm09 bash[34466]: cluster 2026-04-15T13:40:44.050657+0000 mgr.vm06.qbbldl (mgr.14229) 425 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:45.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:45 vm09 bash[34466]: cluster 2026-04-15T13:40:44.050657+0000 mgr.vm06.qbbldl (mgr.14229) 425 : cluster [DBG] pgmap v224: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:45.807 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:45.994 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:45.994 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 36s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:45.994 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 36s ago 5m - - 2026-04-15T13:40:45.994 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 102s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:45.994 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 102s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:46.233 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:46.233 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:46.233 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: audit 2026-04-15T13:40:45.787372+0000 mgr.vm06.qbbldl (mgr.14229) 426 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: audit 2026-04-15T13:40:45.787372+0000 mgr.vm06.qbbldl (mgr.14229) 426 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: audit 2026-04-15T13:40:45.988473+0000 mgr.vm06.qbbldl (mgr.14229) 427 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: audit 2026-04-15T13:40:45.988473+0000 mgr.vm06.qbbldl (mgr.14229) 427 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: cluster 2026-04-15T13:40:46.051079+0000 mgr.vm06.qbbldl (mgr.14229) 428 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: cluster 2026-04-15T13:40:46.051079+0000 mgr.vm06.qbbldl (mgr.14229) 428 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: audit 2026-04-15T13:40:46.230799+0000 mon.vm06 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.106:0/831564167' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:47.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:47 vm06 bash[28114]: audit 2026-04-15T13:40:46.230799+0000 mon.vm06 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.106:0/831564167' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: audit 2026-04-15T13:40:45.787372+0000 mgr.vm06.qbbldl (mgr.14229) 426 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: audit 2026-04-15T13:40:45.787372+0000 mgr.vm06.qbbldl (mgr.14229) 426 : audit [DBG] from='client.15412 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: audit 2026-04-15T13:40:45.988473+0000 mgr.vm06.qbbldl (mgr.14229) 427 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: audit 2026-04-15T13:40:45.988473+0000 mgr.vm06.qbbldl (mgr.14229) 427 : audit [DBG] from='client.15416 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: cluster 2026-04-15T13:40:46.051079+0000 mgr.vm06.qbbldl (mgr.14229) 428 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: cluster 2026-04-15T13:40:46.051079+0000 mgr.vm06.qbbldl (mgr.14229) 428 : cluster [DBG] pgmap v225: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: audit 2026-04-15T13:40:46.230799+0000 mon.vm06 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.106:0/831564167' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:47 vm09 bash[34466]: audit 2026-04-15T13:40:46.230799+0000 mon.vm06 (mon.0) 1037 : audit [DBG] from='client.? 192.168.123.106:0/831564167' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:49.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:49 vm06 bash[28114]: cluster 2026-04-15T13:40:48.051446+0000 mgr.vm06.qbbldl (mgr.14229) 429 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:49.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:49 vm06 bash[28114]: cluster 2026-04-15T13:40:48.051446+0000 mgr.vm06.qbbldl (mgr.14229) 429 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:49.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:49 vm09 bash[34466]: cluster 2026-04-15T13:40:48.051446+0000 mgr.vm06.qbbldl (mgr.14229) 429 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:49.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:49 vm09 bash[34466]: cluster 2026-04-15T13:40:48.051446+0000 mgr.vm06.qbbldl (mgr.14229) 429 : cluster [DBG] pgmap v226: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:51.463 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:51.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:51 vm06 bash[28114]: cluster 2026-04-15T13:40:50.051860+0000 mgr.vm06.qbbldl (mgr.14229) 430 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:51.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:51 vm06 bash[28114]: cluster 2026-04-15T13:40:50.051860+0000 mgr.vm06.qbbldl (mgr.14229) 430 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:51.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:51 vm09 bash[34466]: cluster 2026-04-15T13:40:50.051860+0000 mgr.vm06.qbbldl (mgr.14229) 430 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:51.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:51 vm09 bash[34466]: cluster 2026-04-15T13:40:50.051860+0000 mgr.vm06.qbbldl (mgr.14229) 430 : cluster [DBG] pgmap v227: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:40:51.659 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:51.659 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 42s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:51.659 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 42s ago 5m - - 2026-04-15T13:40:51.659 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 108s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:51.659 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 108s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:51.907 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:51.907 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:51.907 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:52.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:52 vm06 bash[28114]: audit 2026-04-15T13:40:51.905359+0000 mon.vm06 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.106:0/1739326074' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:52.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:52 vm06 bash[28114]: audit 2026-04-15T13:40:51.905359+0000 mon.vm06 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.106:0/1739326074' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:52.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:52 vm09 bash[34466]: audit 2026-04-15T13:40:51.905359+0000 mon.vm06 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.106:0/1739326074' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:52.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:52 vm09 bash[34466]: audit 2026-04-15T13:40:51.905359+0000 mon.vm06 (mon.0) 1038 : audit [DBG] from='client.? 192.168.123.106:0/1739326074' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:53.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:53 vm06 bash[28114]: audit 2026-04-15T13:40:51.438834+0000 mgr.vm06.qbbldl (mgr.14229) 431 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:53 vm06 bash[28114]: audit 2026-04-15T13:40:51.438834+0000 mgr.vm06.qbbldl (mgr.14229) 431 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:53 vm06 bash[28114]: audit 2026-04-15T13:40:51.654315+0000 mgr.vm06.qbbldl (mgr.14229) 432 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:53 vm06 bash[28114]: audit 2026-04-15T13:40:51.654315+0000 mgr.vm06.qbbldl (mgr.14229) 432 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:53 vm06 bash[28114]: cluster 2026-04-15T13:40:52.052205+0000 mgr.vm06.qbbldl (mgr.14229) 433 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:53.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:53 vm06 bash[28114]: cluster 2026-04-15T13:40:52.052205+0000 mgr.vm06.qbbldl (mgr.14229) 433 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:53 vm09 bash[34466]: audit 2026-04-15T13:40:51.438834+0000 mgr.vm06.qbbldl (mgr.14229) 431 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:53 vm09 bash[34466]: audit 2026-04-15T13:40:51.438834+0000 mgr.vm06.qbbldl (mgr.14229) 431 : audit [DBG] from='client.15424 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:53 vm09 bash[34466]: audit 2026-04-15T13:40:51.654315+0000 mgr.vm06.qbbldl (mgr.14229) 432 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:53 vm09 bash[34466]: audit 2026-04-15T13:40:51.654315+0000 mgr.vm06.qbbldl (mgr.14229) 432 : audit [DBG] from='client.15428 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:53 vm09 bash[34466]: cluster 2026-04-15T13:40:52.052205+0000 mgr.vm06.qbbldl (mgr.14229) 433 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:53 vm09 bash[34466]: cluster 2026-04-15T13:40:52.052205+0000 mgr.vm06.qbbldl (mgr.14229) 433 : cluster [DBG] pgmap v228: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:54.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:54 vm06 bash[28114]: audit 2026-04-15T13:40:53.486412+0000 mon.vm06 (mon.0) 1039 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:54.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:54 vm06 bash[28114]: audit 2026-04-15T13:40:53.486412+0000 mon.vm06 (mon.0) 1039 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:54.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:54 vm09 bash[34466]: audit 2026-04-15T13:40:53.486412+0000 mon.vm06 (mon.0) 1039 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:54.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:54 vm09 bash[34466]: audit 2026-04-15T13:40:53.486412+0000 mon.vm06 (mon.0) 1039 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:40:55.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:55 vm06 bash[28114]: cluster 2026-04-15T13:40:54.052576+0000 mgr.vm06.qbbldl (mgr.14229) 434 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:55.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:55 vm06 bash[28114]: cluster 2026-04-15T13:40:54.052576+0000 mgr.vm06.qbbldl (mgr.14229) 434 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:55.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:55 vm09 bash[34466]: cluster 2026-04-15T13:40:54.052576+0000 mgr.vm06.qbbldl (mgr.14229) 434 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:55.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:55 vm09 bash[34466]: cluster 2026-04-15T13:40:54.052576+0000 mgr.vm06.qbbldl (mgr.14229) 434 : cluster [DBG] pgmap v229: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:40:57.103 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:40:57.278 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:40:57.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 47s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:40:57.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 47s ago 5m - - 2026-04-15T13:40:57.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 114s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:40:57.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 114s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:40:57.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:57 vm06 bash[28114]: cluster 2026-04-15T13:40:56.053037+0000 mgr.vm06.qbbldl (mgr.14229) 435 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:57.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:57 vm06 bash[28114]: cluster 2026-04-15T13:40:56.053037+0000 mgr.vm06.qbbldl (mgr.14229) 435 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:57.522 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:40:57.522 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:40:57.522 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:40:57.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:57 vm09 bash[34466]: cluster 2026-04-15T13:40:56.053037+0000 mgr.vm06.qbbldl (mgr.14229) 435 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:57.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:57 vm09 bash[34466]: cluster 2026-04-15T13:40:56.053037+0000 mgr.vm06.qbbldl (mgr.14229) 435 : cluster [DBG] pgmap v230: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:58.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:58 vm06 bash[28114]: audit 2026-04-15T13:40:57.085420+0000 mgr.vm06.qbbldl (mgr.14229) 436 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:58 vm06 bash[28114]: audit 2026-04-15T13:40:57.085420+0000 mgr.vm06.qbbldl (mgr.14229) 436 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:58 vm06 bash[28114]: audit 2026-04-15T13:40:57.273722+0000 mgr.vm06.qbbldl (mgr.14229) 437 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:58 vm06 bash[28114]: audit 2026-04-15T13:40:57.273722+0000 mgr.vm06.qbbldl (mgr.14229) 437 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:58 vm06 bash[28114]: audit 2026-04-15T13:40:57.519789+0000 mon.vm06 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.106:0/163167768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:58.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:58 vm06 bash[28114]: audit 2026-04-15T13:40:57.519789+0000 mon.vm06 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.106:0/163167768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:58 vm09 bash[34466]: audit 2026-04-15T13:40:57.085420+0000 mgr.vm06.qbbldl (mgr.14229) 436 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:58 vm09 bash[34466]: audit 2026-04-15T13:40:57.085420+0000 mgr.vm06.qbbldl (mgr.14229) 436 : audit [DBG] from='client.15436 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:58 vm09 bash[34466]: audit 2026-04-15T13:40:57.273722+0000 mgr.vm06.qbbldl (mgr.14229) 437 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:58 vm09 bash[34466]: audit 2026-04-15T13:40:57.273722+0000 mgr.vm06.qbbldl (mgr.14229) 437 : audit [DBG] from='client.15440 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:40:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:58 vm09 bash[34466]: audit 2026-04-15T13:40:57.519789+0000 mon.vm06 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.106:0/163167768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:58 vm09 bash[34466]: audit 2026-04-15T13:40:57.519789+0000 mon.vm06 (mon.0) 1040 : audit [DBG] from='client.? 192.168.123.106:0/163167768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:40:59.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:59 vm06 bash[28114]: cluster 2026-04-15T13:40:58.053432+0000 mgr.vm06.qbbldl (mgr.14229) 438 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:59.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:40:59 vm06 bash[28114]: cluster 2026-04-15T13:40:58.053432+0000 mgr.vm06.qbbldl (mgr.14229) 438 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:59.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:59 vm09 bash[34466]: cluster 2026-04-15T13:40:58.053432+0000 mgr.vm06.qbbldl (mgr.14229) 438 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:40:59.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:40:59 vm09 bash[34466]: cluster 2026-04-15T13:40:58.053432+0000 mgr.vm06.qbbldl (mgr.14229) 438 : cluster [DBG] pgmap v231: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:01.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:01 vm06 bash[28114]: cluster 2026-04-15T13:41:00.053796+0000 mgr.vm06.qbbldl (mgr.14229) 439 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:01.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:01 vm06 bash[28114]: cluster 2026-04-15T13:41:00.053796+0000 mgr.vm06.qbbldl (mgr.14229) 439 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:01 vm09 bash[34466]: cluster 2026-04-15T13:41:00.053796+0000 mgr.vm06.qbbldl (mgr.14229) 439 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:01 vm09 bash[34466]: cluster 2026-04-15T13:41:00.053796+0000 mgr.vm06.qbbldl (mgr.14229) 439 : cluster [DBG] pgmap v232: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:02.726 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:41:02.910 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:02.910 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 53s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:02.910 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 53s ago 5m - - 2026-04-15T13:41:02.910 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 119s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:02.910 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 119s ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:03.154 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:03.154 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:03.154 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:03.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:03 vm06 bash[28114]: cluster 2026-04-15T13:41:02.054081+0000 mgr.vm06.qbbldl (mgr.14229) 440 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:03.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:03 vm06 bash[28114]: cluster 2026-04-15T13:41:02.054081+0000 mgr.vm06.qbbldl (mgr.14229) 440 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:03.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:03 vm06 bash[28114]: audit 2026-04-15T13:41:03.152047+0000 mon.vm06 (mon.0) 1041 : audit [DBG] from='client.? 192.168.123.106:0/1628452292' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:03.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:03 vm06 bash[28114]: audit 2026-04-15T13:41:03.152047+0000 mon.vm06 (mon.0) 1041 : audit [DBG] from='client.? 192.168.123.106:0/1628452292' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:03 vm09 bash[34466]: cluster 2026-04-15T13:41:02.054081+0000 mgr.vm06.qbbldl (mgr.14229) 440 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:03 vm09 bash[34466]: cluster 2026-04-15T13:41:02.054081+0000 mgr.vm06.qbbldl (mgr.14229) 440 : cluster [DBG] pgmap v233: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:03 vm09 bash[34466]: audit 2026-04-15T13:41:03.152047+0000 mon.vm06 (mon.0) 1041 : audit [DBG] from='client.? 192.168.123.106:0/1628452292' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:03 vm09 bash[34466]: audit 2026-04-15T13:41:03.152047+0000 mon.vm06 (mon.0) 1041 : audit [DBG] from='client.? 192.168.123.106:0/1628452292' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:04.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:04 vm06 bash[28114]: audit 2026-04-15T13:41:02.709000+0000 mgr.vm06.qbbldl (mgr.14229) 441 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:04 vm06 bash[28114]: audit 2026-04-15T13:41:02.709000+0000 mgr.vm06.qbbldl (mgr.14229) 441 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:04 vm06 bash[28114]: audit 2026-04-15T13:41:02.904863+0000 mgr.vm06.qbbldl (mgr.14229) 442 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:04 vm06 bash[28114]: audit 2026-04-15T13:41:02.904863+0000 mgr.vm06.qbbldl (mgr.14229) 442 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:04 vm09 bash[34466]: audit 2026-04-15T13:41:02.709000+0000 mgr.vm06.qbbldl (mgr.14229) 441 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:04 vm09 bash[34466]: audit 2026-04-15T13:41:02.709000+0000 mgr.vm06.qbbldl (mgr.14229) 441 : audit [DBG] from='client.15448 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:04 vm09 bash[34466]: audit 2026-04-15T13:41:02.904863+0000 mgr.vm06.qbbldl (mgr.14229) 442 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:04 vm09 bash[34466]: audit 2026-04-15T13:41:02.904863+0000 mgr.vm06.qbbldl (mgr.14229) 442 : audit [DBG] from='client.15452 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:05.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:05 vm06 bash[28114]: cluster 2026-04-15T13:41:04.054430+0000 mgr.vm06.qbbldl (mgr.14229) 443 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:05.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:05 vm06 bash[28114]: cluster 2026-04-15T13:41:04.054430+0000 mgr.vm06.qbbldl (mgr.14229) 443 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:05 vm09 bash[34466]: cluster 2026-04-15T13:41:04.054430+0000 mgr.vm06.qbbldl (mgr.14229) 443 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:05 vm09 bash[34466]: cluster 2026-04-15T13:41:04.054430+0000 mgr.vm06.qbbldl (mgr.14229) 443 : cluster [DBG] pgmap v234: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:07.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:07 vm06 bash[28114]: cluster 2026-04-15T13:41:06.054859+0000 mgr.vm06.qbbldl (mgr.14229) 444 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:07.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:07 vm06 bash[28114]: cluster 2026-04-15T13:41:06.054859+0000 mgr.vm06.qbbldl (mgr.14229) 444 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:07 vm09 bash[34466]: cluster 2026-04-15T13:41:06.054859+0000 mgr.vm06.qbbldl (mgr.14229) 444 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:07 vm09 bash[34466]: cluster 2026-04-15T13:41:06.054859+0000 mgr.vm06.qbbldl (mgr.14229) 444 : cluster [DBG] pgmap v235: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:08.379 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:41:08.570 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:08.570 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 58s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:08.570 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 58s ago 5m - - 2026-04-15T13:41:08.570 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:08.570 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:08.828 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:08.828 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:08.828 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: cluster 2026-04-15T13:41:08.055198+0000 mgr.vm06.qbbldl (mgr.14229) 445 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: cluster 2026-04-15T13:41:08.055198+0000 mgr.vm06.qbbldl (mgr.14229) 445 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: audit 2026-04-15T13:41:08.358837+0000 mgr.vm06.qbbldl (mgr.14229) 446 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: audit 2026-04-15T13:41:08.358837+0000 mgr.vm06.qbbldl (mgr.14229) 446 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: audit 2026-04-15T13:41:08.486733+0000 mon.vm06 (mon.0) 1042 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: audit 2026-04-15T13:41:08.486733+0000 mon.vm06 (mon.0) 1042 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: audit 2026-04-15T13:41:08.825734+0000 mon.vm06 (mon.0) 1043 : audit [DBG] from='client.? 192.168.123.106:0/94380271' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:09.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:09 vm06 bash[28114]: audit 2026-04-15T13:41:08.825734+0000 mon.vm06 (mon.0) 1043 : audit [DBG] from='client.? 192.168.123.106:0/94380271' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: cluster 2026-04-15T13:41:08.055198+0000 mgr.vm06.qbbldl (mgr.14229) 445 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: cluster 2026-04-15T13:41:08.055198+0000 mgr.vm06.qbbldl (mgr.14229) 445 : cluster [DBG] pgmap v236: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: audit 2026-04-15T13:41:08.358837+0000 mgr.vm06.qbbldl (mgr.14229) 446 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: audit 2026-04-15T13:41:08.358837+0000 mgr.vm06.qbbldl (mgr.14229) 446 : audit [DBG] from='client.15460 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: audit 2026-04-15T13:41:08.486733+0000 mon.vm06 (mon.0) 1042 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: audit 2026-04-15T13:41:08.486733+0000 mon.vm06 (mon.0) 1042 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: audit 2026-04-15T13:41:08.825734+0000 mon.vm06 (mon.0) 1043 : audit [DBG] from='client.? 192.168.123.106:0/94380271' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:09 vm09 bash[34466]: audit 2026-04-15T13:41:08.825734+0000 mon.vm06 (mon.0) 1043 : audit [DBG] from='client.? 192.168.123.106:0/94380271' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:10.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:10 vm06 bash[28114]: audit 2026-04-15T13:41:08.564596+0000 mgr.vm06.qbbldl (mgr.14229) 447 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:10.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:10 vm06 bash[28114]: audit 2026-04-15T13:41:08.564596+0000 mgr.vm06.qbbldl (mgr.14229) 447 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:10 vm09 bash[34466]: audit 2026-04-15T13:41:08.564596+0000 mgr.vm06.qbbldl (mgr.14229) 447 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:10 vm09 bash[34466]: audit 2026-04-15T13:41:08.564596+0000 mgr.vm06.qbbldl (mgr.14229) 447 : audit [DBG] from='client.15464 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:11.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:11 vm06 bash[28114]: cluster 2026-04-15T13:41:10.055691+0000 mgr.vm06.qbbldl (mgr.14229) 448 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:11.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:11 vm06 bash[28114]: cluster 2026-04-15T13:41:10.055691+0000 mgr.vm06.qbbldl (mgr.14229) 448 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:11.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:11 vm06 bash[28114]: audit 2026-04-15T13:41:10.226189+0000 mon.vm06 (mon.0) 1044 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:11.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:11 vm06 bash[28114]: audit 2026-04-15T13:41:10.226189+0000 mon.vm06 (mon.0) 1044 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:11 vm09 bash[34466]: cluster 2026-04-15T13:41:10.055691+0000 mgr.vm06.qbbldl (mgr.14229) 448 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:11 vm09 bash[34466]: cluster 2026-04-15T13:41:10.055691+0000 mgr.vm06.qbbldl (mgr.14229) 448 : cluster [DBG] pgmap v237: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:11 vm09 bash[34466]: audit 2026-04-15T13:41:10.226189+0000 mon.vm06 (mon.0) 1044 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:11 vm09 bash[34466]: audit 2026-04-15T13:41:10.226189+0000 mon.vm06 (mon.0) 1044 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:13 vm06 bash[28114]: cluster 2026-04-15T13:41:12.056041+0000 mgr.vm06.qbbldl (mgr.14229) 449 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:13.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:13 vm06 bash[28114]: cluster 2026-04-15T13:41:12.056041+0000 mgr.vm06.qbbldl (mgr.14229) 449 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:13 vm09 bash[34466]: cluster 2026-04-15T13:41:12.056041+0000 mgr.vm06.qbbldl (mgr.14229) 449 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:13 vm09 bash[34466]: cluster 2026-04-15T13:41:12.056041+0000 mgr.vm06.qbbldl (mgr.14229) 449 : cluster [DBG] pgmap v238: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:14.062 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:41:14.260 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:14.260 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (4m) 64s ago 5m 110M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:14.260 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 64s ago 5m - - 2026-04-15T13:41:14.260 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:14.260 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:14.507 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:14.507 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:14.507 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:15.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: audit 2026-04-15T13:41:14.039286+0000 mgr.vm06.qbbldl (mgr.14229) 450 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: audit 2026-04-15T13:41:14.039286+0000 mgr.vm06.qbbldl (mgr.14229) 450 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: cluster 2026-04-15T13:41:14.056467+0000 mgr.vm06.qbbldl (mgr.14229) 451 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:15.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: cluster 2026-04-15T13:41:14.056467+0000 mgr.vm06.qbbldl (mgr.14229) 451 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:15.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: audit 2026-04-15T13:41:14.254735+0000 mgr.vm06.qbbldl (mgr.14229) 452 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: audit 2026-04-15T13:41:14.254735+0000 mgr.vm06.qbbldl (mgr.14229) 452 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: audit 2026-04-15T13:41:14.504877+0000 mon.vm06 (mon.0) 1045 : audit [DBG] from='client.? 192.168.123.106:0/955564876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:15.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:15 vm06 bash[28114]: audit 2026-04-15T13:41:14.504877+0000 mon.vm06 (mon.0) 1045 : audit [DBG] from='client.? 192.168.123.106:0/955564876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: audit 2026-04-15T13:41:14.039286+0000 mgr.vm06.qbbldl (mgr.14229) 450 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: audit 2026-04-15T13:41:14.039286+0000 mgr.vm06.qbbldl (mgr.14229) 450 : audit [DBG] from='client.15472 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: cluster 2026-04-15T13:41:14.056467+0000 mgr.vm06.qbbldl (mgr.14229) 451 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: cluster 2026-04-15T13:41:14.056467+0000 mgr.vm06.qbbldl (mgr.14229) 451 : cluster [DBG] pgmap v239: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: audit 2026-04-15T13:41:14.254735+0000 mgr.vm06.qbbldl (mgr.14229) 452 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: audit 2026-04-15T13:41:14.254735+0000 mgr.vm06.qbbldl (mgr.14229) 452 : audit [DBG] from='client.15476 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: audit 2026-04-15T13:41:14.504877+0000 mon.vm06 (mon.0) 1045 : audit [DBG] from='client.? 192.168.123.106:0/955564876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:15 vm09 bash[34466]: audit 2026-04-15T13:41:14.504877+0000 mon.vm06 (mon.0) 1045 : audit [DBG] from='client.? 192.168.123.106:0/955564876' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:17.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:15.780438+0000 mon.vm06 (mon.0) 1046 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:15.780438+0000 mon.vm06 (mon.0) 1046 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:15.785324+0000 mon.vm06 (mon.0) 1047 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:15.785324+0000 mon.vm06 (mon.0) 1047 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: cluster 2026-04-15T13:41:16.056957+0000 mgr.vm06.qbbldl (mgr.14229) 453 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: cluster 2026-04-15T13:41:16.056957+0000 mgr.vm06.qbbldl (mgr.14229) 453 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.133504+0000 mon.vm06 (mon.0) 1048 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.133504+0000 mon.vm06 (mon.0) 1048 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.134201+0000 mon.vm06 (mon.0) 1049 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.134201+0000 mon.vm06 (mon.0) 1049 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: cluster 2026-04-15T13:41:16.135367+0000 mgr.vm06.qbbldl (mgr.14229) 454 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: cluster 2026-04-15T13:41:16.135367+0000 mgr.vm06.qbbldl (mgr.14229) 454 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.139436+0000 mon.vm06 (mon.0) 1050 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.139436+0000 mon.vm06 (mon.0) 1050 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.140855+0000 mon.vm06 (mon.0) 1051 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:16 vm06 bash[28114]: audit 2026-04-15T13:41:16.140855+0000 mon.vm06 (mon.0) 1051 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:17.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:15.780438+0000 mon.vm06 (mon.0) 1046 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:15.780438+0000 mon.vm06 (mon.0) 1046 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:15.785324+0000 mon.vm06 (mon.0) 1047 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:15.785324+0000 mon.vm06 (mon.0) 1047 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: cluster 2026-04-15T13:41:16.056957+0000 mgr.vm06.qbbldl (mgr.14229) 453 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: cluster 2026-04-15T13:41:16.056957+0000 mgr.vm06.qbbldl (mgr.14229) 453 : cluster [DBG] pgmap v240: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.133504+0000 mon.vm06 (mon.0) 1048 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.133504+0000 mon.vm06 (mon.0) 1048 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.134201+0000 mon.vm06 (mon.0) 1049 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.134201+0000 mon.vm06 (mon.0) 1049 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: cluster 2026-04-15T13:41:16.135367+0000 mgr.vm06.qbbldl (mgr.14229) 454 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: cluster 2026-04-15T13:41:16.135367+0000 mgr.vm06.qbbldl (mgr.14229) 454 : cluster [DBG] pgmap v241: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.139436+0000 mon.vm06 (mon.0) 1050 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.139436+0000 mon.vm06 (mon.0) 1050 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.140855+0000 mon.vm06 (mon.0) 1051 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:16 vm09 bash[34466]: audit 2026-04-15T13:41:16.140855+0000 mon.vm06 (mon.0) 1051 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:19.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:19 vm06 bash[28114]: cluster 2026-04-15T13:41:18.135911+0000 mgr.vm06.qbbldl (mgr.14229) 455 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:19.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:19 vm06 bash[28114]: cluster 2026-04-15T13:41:18.135911+0000 mgr.vm06.qbbldl (mgr.14229) 455 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:19 vm09 bash[34466]: cluster 2026-04-15T13:41:18.135911+0000 mgr.vm06.qbbldl (mgr.14229) 455 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:19 vm09 bash[34466]: cluster 2026-04-15T13:41:18.135911+0000 mgr.vm06.qbbldl (mgr.14229) 455 : cluster [DBG] pgmap v242: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 203 B/s rd, 406 B/s wr, 0 op/s 2026-04-15T13:41:19.778 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:41:19.990 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:19.991 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 4s ago 5m 115M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:19.991 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 4s ago 5m - - 2026-04-15T13:41:19.991 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:19.991 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:20.243 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:20.243 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:20.243 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: audit 2026-04-15T13:41:19.754923+0000 mgr.vm06.qbbldl (mgr.14229) 456 : audit [DBG] from='client.15484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: audit 2026-04-15T13:41:19.754923+0000 mgr.vm06.qbbldl (mgr.14229) 456 : audit [DBG] from='client.15484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: audit 2026-04-15T13:41:19.983309+0000 mgr.vm06.qbbldl (mgr.14229) 457 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: audit 2026-04-15T13:41:19.983309+0000 mgr.vm06.qbbldl (mgr.14229) 457 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: cluster 2026-04-15T13:41:20.136444+0000 mgr.vm06.qbbldl (mgr.14229) 458 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: cluster 2026-04-15T13:41:20.136444+0000 mgr.vm06.qbbldl (mgr.14229) 458 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: audit 2026-04-15T13:41:20.240337+0000 mon.vm06 (mon.0) 1052 : audit [DBG] from='client.? 192.168.123.106:0/2264537276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:21.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:21 vm06 bash[28114]: audit 2026-04-15T13:41:20.240337+0000 mon.vm06 (mon.0) 1052 : audit [DBG] from='client.? 192.168.123.106:0/2264537276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: audit 2026-04-15T13:41:19.754923+0000 mgr.vm06.qbbldl (mgr.14229) 456 : audit [DBG] from='client.15484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: audit 2026-04-15T13:41:19.754923+0000 mgr.vm06.qbbldl (mgr.14229) 456 : audit [DBG] from='client.15484 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: audit 2026-04-15T13:41:19.983309+0000 mgr.vm06.qbbldl (mgr.14229) 457 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: audit 2026-04-15T13:41:19.983309+0000 mgr.vm06.qbbldl (mgr.14229) 457 : audit [DBG] from='client.15488 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: cluster 2026-04-15T13:41:20.136444+0000 mgr.vm06.qbbldl (mgr.14229) 458 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: cluster 2026-04-15T13:41:20.136444+0000 mgr.vm06.qbbldl (mgr.14229) 458 : cluster [DBG] pgmap v243: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: audit 2026-04-15T13:41:20.240337+0000 mon.vm06 (mon.0) 1052 : audit [DBG] from='client.? 192.168.123.106:0/2264537276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:21 vm09 bash[34466]: audit 2026-04-15T13:41:20.240337+0000 mon.vm06 (mon.0) 1052 : audit [DBG] from='client.? 192.168.123.106:0/2264537276' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:23.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:23 vm06 bash[28114]: cluster 2026-04-15T13:41:22.136811+0000 mgr.vm06.qbbldl (mgr.14229) 459 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:23.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:23 vm06 bash[28114]: cluster 2026-04-15T13:41:22.136811+0000 mgr.vm06.qbbldl (mgr.14229) 459 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:23 vm09 bash[34466]: cluster 2026-04-15T13:41:22.136811+0000 mgr.vm06.qbbldl (mgr.14229) 459 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:23 vm09 bash[34466]: cluster 2026-04-15T13:41:22.136811+0000 mgr.vm06.qbbldl (mgr.14229) 459 : cluster [DBG] pgmap v244: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:24.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:24 vm06 bash[28114]: audit 2026-04-15T13:41:23.486961+0000 mon.vm06 (mon.0) 1053 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:24.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:24 vm06 bash[28114]: audit 2026-04-15T13:41:23.486961+0000 mon.vm06 (mon.0) 1053 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:24 vm09 bash[34466]: audit 2026-04-15T13:41:23.486961+0000 mon.vm06 (mon.0) 1053 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:24 vm09 bash[34466]: audit 2026-04-15T13:41:23.486961+0000 mon.vm06 (mon.0) 1053 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:25.459 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to stop 2026-04-15T13:41:25.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:25 vm06 bash[28114]: cluster 2026-04-15T13:41:24.137195+0000 mgr.vm06.qbbldl (mgr.14229) 460 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:25.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:25 vm06 bash[28114]: cluster 2026-04-15T13:41:24.137195+0000 mgr.vm06.qbbldl (mgr.14229) 460 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:25.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:25 vm09 bash[34466]: cluster 2026-04-15T13:41:24.137195+0000 mgr.vm06.qbbldl (mgr.14229) 460 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:25.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:25 vm09 bash[34466]: cluster 2026-04-15T13:41:24.137195+0000 mgr.vm06.qbbldl (mgr.14229) 460 : cluster [DBG] pgmap v245: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:25.661 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:25.661 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 9s ago 5m 115M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:25.661 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 9s ago 5m - - 2026-04-15T13:41:25.661 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:25.661 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5m) 2m ago 5m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:25.934 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:25.935 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:25.935 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:26.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:26 vm06 bash[28114]: audit 2026-04-15T13:41:25.931535+0000 mon.vm06 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.106:0/3318127291' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:26.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:26 vm06 bash[28114]: audit 2026-04-15T13:41:25.931535+0000 mon.vm06 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.106:0/3318127291' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:26.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:26 vm09 bash[34466]: audit 2026-04-15T13:41:25.931535+0000 mon.vm06 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.106:0/3318127291' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:26.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:26 vm09 bash[34466]: audit 2026-04-15T13:41:25.931535+0000 mon.vm06 (mon.0) 1054 : audit [DBG] from='client.? 192.168.123.106:0/3318127291' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:27.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:27 vm06 bash[28114]: audit 2026-04-15T13:41:25.439326+0000 mgr.vm06.qbbldl (mgr.14229) 461 : audit [DBG] from='client.15496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:27 vm06 bash[28114]: audit 2026-04-15T13:41:25.439326+0000 mgr.vm06.qbbldl (mgr.14229) 461 : audit [DBG] from='client.15496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:27 vm06 bash[28114]: audit 2026-04-15T13:41:25.656009+0000 mgr.vm06.qbbldl (mgr.14229) 462 : audit [DBG] from='client.15500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:27 vm06 bash[28114]: audit 2026-04-15T13:41:25.656009+0000 mgr.vm06.qbbldl (mgr.14229) 462 : audit [DBG] from='client.15500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:27 vm06 bash[28114]: cluster 2026-04-15T13:41:26.137765+0000 mgr.vm06.qbbldl (mgr.14229) 463 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:27.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:27 vm06 bash[28114]: cluster 2026-04-15T13:41:26.137765+0000 mgr.vm06.qbbldl (mgr.14229) 463 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:27 vm09 bash[34466]: audit 2026-04-15T13:41:25.439326+0000 mgr.vm06.qbbldl (mgr.14229) 461 : audit [DBG] from='client.15496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:27 vm09 bash[34466]: audit 2026-04-15T13:41:25.439326+0000 mgr.vm06.qbbldl (mgr.14229) 461 : audit [DBG] from='client.15496 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:27 vm09 bash[34466]: audit 2026-04-15T13:41:25.656009+0000 mgr.vm06.qbbldl (mgr.14229) 462 : audit [DBG] from='client.15500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:27 vm09 bash[34466]: audit 2026-04-15T13:41:25.656009+0000 mgr.vm06.qbbldl (mgr.14229) 462 : audit [DBG] from='client.15500 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:27 vm09 bash[34466]: cluster 2026-04-15T13:41:26.137765+0000 mgr.vm06.qbbldl (mgr.14229) 463 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:27.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:27 vm09 bash[34466]: cluster 2026-04-15T13:41:26.137765+0000 mgr.vm06.qbbldl (mgr.14229) 463 : cluster [DBG] pgmap v246: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 101 B/s rd, 203 B/s wr, 0 op/s 2026-04-15T13:41:29.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:29 vm09 bash[34466]: cluster 2026-04-15T13:41:28.138215+0000 mgr.vm06.qbbldl (mgr.14229) 464 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:29.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:29 vm09 bash[34466]: cluster 2026-04-15T13:41:28.138215+0000 mgr.vm06.qbbldl (mgr.14229) 464 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:29 vm06 bash[28114]: cluster 2026-04-15T13:41:28.138215+0000 mgr.vm06.qbbldl (mgr.14229) 464 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:29.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:29 vm06 bash[28114]: cluster 2026-04-15T13:41:28.138215+0000 mgr.vm06.qbbldl (mgr.14229) 464 : cluster [DBG] pgmap v247: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:30.927 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:41:30.927 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:41:30.928 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:41:31.135 INFO:teuthology.orchestra.run.vm06.stdout:anonymousScheduled to start rgw.foo.vm06.liyzhd on host 'vm06' 2026-04-15T13:41:31.359 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to start 2026-04-15T13:41:31.583 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:31.583 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 15s ago 6m 115M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:31.583 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 15s ago 6m - - 2026-04-15T13:41:31.583 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:31.583 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: cluster 2026-04-15T13:41:30.138668+0000 mgr.vm06.qbbldl (mgr.14229) 465 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: cluster 2026-04-15T13:41:30.138668+0000 mgr.vm06.qbbldl (mgr.14229) 465 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: audit 2026-04-15T13:41:31.126411+0000 mon.vm06 (mon.0) 1055 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: audit 2026-04-15T13:41:31.126411+0000 mon.vm06 (mon.0) 1055 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: audit 2026-04-15T13:41:31.131889+0000 mon.vm06 (mon.0) 1056 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: audit 2026-04-15T13:41:31.131889+0000 mon.vm06 (mon.0) 1056 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: audit 2026-04-15T13:41:31.132844+0000 mon.vm06 (mon.0) 1057 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:31.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:31 vm09 bash[34466]: audit 2026-04-15T13:41:31.132844+0000 mon.vm06 (mon.0) 1057 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:31.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: cluster 2026-04-15T13:41:30.138668+0000 mgr.vm06.qbbldl (mgr.14229) 465 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: cluster 2026-04-15T13:41:30.138668+0000 mgr.vm06.qbbldl (mgr.14229) 465 : cluster [DBG] pgmap v248: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: audit 2026-04-15T13:41:31.126411+0000 mon.vm06 (mon.0) 1055 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: audit 2026-04-15T13:41:31.126411+0000 mon.vm06 (mon.0) 1055 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: audit 2026-04-15T13:41:31.131889+0000 mon.vm06 (mon.0) 1056 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: audit 2026-04-15T13:41:31.131889+0000 mon.vm06 (mon.0) 1056 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: audit 2026-04-15T13:41:31.132844+0000 mon.vm06 (mon.0) 1057 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:31 vm06 bash[28114]: audit 2026-04-15T13:41:31.132844+0000 mon.vm06 (mon.0) 1057 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:31.818 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:31.818 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:31.818 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: audit 2026-04-15T13:41:31.118792+0000 mgr.vm06.qbbldl (mgr.14229) 466 : audit [DBG] from='client.15508 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: audit 2026-04-15T13:41:31.118792+0000 mgr.vm06.qbbldl (mgr.14229) 466 : audit [DBG] from='client.15508 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: cephadm 2026-04-15T13:41:31.119141+0000 mgr.vm06.qbbldl (mgr.14229) 467 : cephadm [INF] Schedule start daemon rgw.foo.vm06.liyzhd 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: cephadm 2026-04-15T13:41:31.119141+0000 mgr.vm06.qbbldl (mgr.14229) 467 : cephadm [INF] Schedule start daemon rgw.foo.vm06.liyzhd 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: audit 2026-04-15T13:41:31.338534+0000 mgr.vm06.qbbldl (mgr.14229) 468 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: audit 2026-04-15T13:41:31.338534+0000 mgr.vm06.qbbldl (mgr.14229) 468 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: audit 2026-04-15T13:41:31.815501+0000 mon.vm06 (mon.0) 1058 : audit [DBG] from='client.? 192.168.123.106:0/4061526680' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:32 vm09 bash[34466]: audit 2026-04-15T13:41:31.815501+0000 mon.vm06 (mon.0) 1058 : audit [DBG] from='client.? 192.168.123.106:0/4061526680' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:32.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: audit 2026-04-15T13:41:31.118792+0000 mgr.vm06.qbbldl (mgr.14229) 466 : audit [DBG] from='client.15508 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: audit 2026-04-15T13:41:31.118792+0000 mgr.vm06.qbbldl (mgr.14229) 466 : audit [DBG] from='client.15508 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm06.liyzhd", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: cephadm 2026-04-15T13:41:31.119141+0000 mgr.vm06.qbbldl (mgr.14229) 467 : cephadm [INF] Schedule start daemon rgw.foo.vm06.liyzhd 2026-04-15T13:41:32.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: cephadm 2026-04-15T13:41:31.119141+0000 mgr.vm06.qbbldl (mgr.14229) 467 : cephadm [INF] Schedule start daemon rgw.foo.vm06.liyzhd 2026-04-15T13:41:32.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: audit 2026-04-15T13:41:31.338534+0000 mgr.vm06.qbbldl (mgr.14229) 468 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: audit 2026-04-15T13:41:31.338534+0000 mgr.vm06.qbbldl (mgr.14229) 468 : audit [DBG] from='client.15512 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:32.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: audit 2026-04-15T13:41:31.815501+0000 mon.vm06 (mon.0) 1058 : audit [DBG] from='client.? 192.168.123.106:0/4061526680' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:32.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:32 vm06 bash[28114]: audit 2026-04-15T13:41:31.815501+0000 mon.vm06 (mon.0) 1058 : audit [DBG] from='client.? 192.168.123.106:0/4061526680' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:33 vm09 bash[34466]: audit 2026-04-15T13:41:31.575132+0000 mgr.vm06.qbbldl (mgr.14229) 469 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:33 vm09 bash[34466]: audit 2026-04-15T13:41:31.575132+0000 mgr.vm06.qbbldl (mgr.14229) 469 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:33 vm09 bash[34466]: cluster 2026-04-15T13:41:32.139170+0000 mgr.vm06.qbbldl (mgr.14229) 470 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:33.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:33 vm09 bash[34466]: cluster 2026-04-15T13:41:32.139170+0000 mgr.vm06.qbbldl (mgr.14229) 470 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:33.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:33 vm06 bash[28114]: audit 2026-04-15T13:41:31.575132+0000 mgr.vm06.qbbldl (mgr.14229) 469 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:33 vm06 bash[28114]: audit 2026-04-15T13:41:31.575132+0000 mgr.vm06.qbbldl (mgr.14229) 469 : audit [DBG] from='client.15516 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:33 vm06 bash[28114]: cluster 2026-04-15T13:41:32.139170+0000 mgr.vm06.qbbldl (mgr.14229) 470 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:33 vm06 bash[28114]: cluster 2026-04-15T13:41:32.139170+0000 mgr.vm06.qbbldl (mgr.14229) 470 : cluster [DBG] pgmap v249: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:35 vm09 bash[34466]: cluster 2026-04-15T13:41:34.139669+0000 mgr.vm06.qbbldl (mgr.14229) 471 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:35.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:35 vm09 bash[34466]: cluster 2026-04-15T13:41:34.139669+0000 mgr.vm06.qbbldl (mgr.14229) 471 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:35.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:35 vm06 bash[28114]: cluster 2026-04-15T13:41:34.139669+0000 mgr.vm06.qbbldl (mgr.14229) 471 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:35 vm06 bash[28114]: cluster 2026-04-15T13:41:34.139669+0000 mgr.vm06.qbbldl (mgr.14229) 471 : cluster [DBG] pgmap v250: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:41:37.038 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to start 2026-04-15T13:41:37.246 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:37.256 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 0s ago 6m 116M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:37.256 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 error 0s ago 6m - - 2026-04-15T13:41:37.257 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:37.257 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: cluster 2026-04-15T13:41:36.140105+0000 mgr.vm06.qbbldl (mgr.14229) 472 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: cluster 2026-04-15T13:41:36.140105+0000 mgr.vm06.qbbldl (mgr.14229) 472 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.560091+0000 mon.vm06 (mon.0) 1059 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.560091+0000 mon.vm06 (mon.0) 1059 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.564761+0000 mon.vm06 (mon.0) 1060 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.564761+0000 mon.vm06 (mon.0) 1060 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.565481+0000 mon.vm06 (mon.0) 1061 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.565481+0000 mon.vm06 (mon.0) 1061 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.565979+0000 mon.vm06 (mon.0) 1062 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.565979+0000 mon.vm06 (mon.0) 1062 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.569705+0000 mon.vm06 (mon.0) 1063 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.569705+0000 mon.vm06 (mon.0) 1063 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.570933+0000 mon.vm06 (mon.0) 1064 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:37.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:37 vm06 bash[28114]: audit 2026-04-15T13:41:36.570933+0000 mon.vm06 (mon.0) 1064 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:37.569 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:37.569 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:37.569 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: cluster 2026-04-15T13:41:36.140105+0000 mgr.vm06.qbbldl (mgr.14229) 472 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: cluster 2026-04-15T13:41:36.140105+0000 mgr.vm06.qbbldl (mgr.14229) 472 : cluster [DBG] pgmap v251: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.560091+0000 mon.vm06 (mon.0) 1059 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.560091+0000 mon.vm06 (mon.0) 1059 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.564761+0000 mon.vm06 (mon.0) 1060 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.564761+0000 mon.vm06 (mon.0) 1060 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.565481+0000 mon.vm06 (mon.0) 1061 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.565481+0000 mon.vm06 (mon.0) 1061 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.565979+0000 mon.vm06 (mon.0) 1062 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.565979+0000 mon.vm06 (mon.0) 1062 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.569705+0000 mon.vm06 (mon.0) 1063 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.569705+0000 mon.vm06 (mon.0) 1063 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.570933+0000 mon.vm06 (mon.0) 1064 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:37 vm09 bash[34466]: audit 2026-04-15T13:41:36.570933+0000 mon.vm06 (mon.0) 1064 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: cluster 2026-04-15T13:41:36.566920+0000 mgr.vm06.qbbldl (mgr.14229) 473 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: cluster 2026-04-15T13:41:36.566920+0000 mgr.vm06.qbbldl (mgr.14229) 473 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: cluster 2026-04-15T13:41:36.567002+0000 mgr.vm06.qbbldl (mgr.14229) 474 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: cluster 2026-04-15T13:41:36.567002+0000 mgr.vm06.qbbldl (mgr.14229) 474 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.011795+0000 mgr.vm06.qbbldl (mgr.14229) 475 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.011795+0000 mgr.vm06.qbbldl (mgr.14229) 475 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.240754+0000 mgr.vm06.qbbldl (mgr.14229) 476 : audit [DBG] from='client.15528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.240754+0000 mgr.vm06.qbbldl (mgr.14229) 476 : audit [DBG] from='client.15528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.318736+0000 mon.vm06 (mon.0) 1065 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.318736+0000 mon.vm06 (mon.0) 1065 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.342275+0000 mon.vm06 (mon.0) 1066 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.342275+0000 mon.vm06 (mon.0) 1066 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.344828+0000 mon.vm06 (mon.0) 1067 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.344828+0000 mon.vm06 (mon.0) 1067 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.563666+0000 mon.vm06 (mon.0) 1068 : audit [DBG] from='client.? 192.168.123.106:0/3451210932' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:38.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:38 vm09 bash[34466]: audit 2026-04-15T13:41:37.563666+0000 mon.vm06 (mon.0) 1068 : audit [DBG] from='client.? 192.168.123.106:0/3451210932' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:38.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: cluster 2026-04-15T13:41:36.566920+0000 mgr.vm06.qbbldl (mgr.14229) 473 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: cluster 2026-04-15T13:41:36.566920+0000 mgr.vm06.qbbldl (mgr.14229) 473 : cluster [DBG] pgmap v252: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: cluster 2026-04-15T13:41:36.567002+0000 mgr.vm06.qbbldl (mgr.14229) 474 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: cluster 2026-04-15T13:41:36.567002+0000 mgr.vm06.qbbldl (mgr.14229) 474 : cluster [DBG] pgmap v253: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.011795+0000 mgr.vm06.qbbldl (mgr.14229) 475 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.011795+0000 mgr.vm06.qbbldl (mgr.14229) 475 : audit [DBG] from='client.15524 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.240754+0000 mgr.vm06.qbbldl (mgr.14229) 476 : audit [DBG] from='client.15528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.240754+0000 mgr.vm06.qbbldl (mgr.14229) 476 : audit [DBG] from='client.15528 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.318736+0000 mon.vm06 (mon.0) 1065 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.318736+0000 mon.vm06 (mon.0) 1065 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.342275+0000 mon.vm06 (mon.0) 1066 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.342275+0000 mon.vm06 (mon.0) 1066 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.344828+0000 mon.vm06 (mon.0) 1067 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.344828+0000 mon.vm06 (mon.0) 1067 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.563666+0000 mon.vm06 (mon.0) 1068 : audit [DBG] from='client.? 192.168.123.106:0/3451210932' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:38 vm06 bash[28114]: audit 2026-04-15T13:41:37.563666+0000 mon.vm06 (mon.0) 1068 : audit [DBG] from='client.? 192.168.123.106:0/3451210932' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:39.753 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:39 vm09 bash[34466]: audit 2026-04-15T13:41:38.487300+0000 mon.vm06 (mon.0) 1069 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:39.753 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:39 vm09 bash[34466]: audit 2026-04-15T13:41:38.487300+0000 mon.vm06 (mon.0) 1069 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:39 vm06 bash[28114]: audit 2026-04-15T13:41:38.487300+0000 mon.vm06 (mon.0) 1069 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:39.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:39 vm06 bash[28114]: audit 2026-04-15T13:41:38.487300+0000 mon.vm06 (mon.0) 1069 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:40.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:40 vm06 bash[28114]: cluster 2026-04-15T13:41:38.567327+0000 mgr.vm06.qbbldl (mgr.14229) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 50 op/s 2026-04-15T13:41:40.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:40 vm06 bash[28114]: cluster 2026-04-15T13:41:38.567327+0000 mgr.vm06.qbbldl (mgr.14229) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 50 op/s 2026-04-15T13:41:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:40 vm09 bash[34466]: cluster 2026-04-15T13:41:38.567327+0000 mgr.vm06.qbbldl (mgr.14229) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 50 op/s 2026-04-15T13:41:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:40 vm09 bash[34466]: cluster 2026-04-15T13:41:38.567327+0000 mgr.vm06.qbbldl (mgr.14229) 477 : cluster [DBG] pgmap v254: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 31 KiB/s rd, 0 B/s wr, 50 op/s 2026-04-15T13:41:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:42 vm06 bash[28114]: cluster 2026-04-15T13:41:40.567845+0000 mgr.vm06.qbbldl (mgr.14229) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 485 B/s wr, 126 op/s 2026-04-15T13:41:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:42 vm06 bash[28114]: cluster 2026-04-15T13:41:40.567845+0000 mgr.vm06.qbbldl (mgr.14229) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 485 B/s wr, 126 op/s 2026-04-15T13:41:42.853 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm06.liyzhd to start 2026-04-15T13:41:42.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:42 vm09 bash[34466]: cluster 2026-04-15T13:41:40.567845+0000 mgr.vm06.qbbldl (mgr.14229) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 485 B/s wr, 126 op/s 2026-04-15T13:41:42.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:42 vm09 bash[34466]: cluster 2026-04-15T13:41:40.567845+0000 mgr.vm06.qbbldl (mgr.14229) 478 : cluster [DBG] pgmap v255: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 485 B/s wr, 126 op/s 2026-04-15T13:41:43.049 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:43.049 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 0s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:43.049 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5s) 0s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:41:43.049 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:43.049 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:43.308 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:41:43.308 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:41:43.308 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm06.liyzhd on vm06 is in error state 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:42.568337+0000 mgr.vm06.qbbldl (mgr.14229) 479 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 485 B/s wr, 152 op/s 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:42.568337+0000 mgr.vm06.qbbldl (mgr.14229) 479 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 485 B/s wr, 152 op/s 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.827250+0000 mgr.vm06.qbbldl (mgr.14229) 480 : audit [DBG] from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.827250+0000 mgr.vm06.qbbldl (mgr.14229) 480 : audit [DBG] from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.885954+0000 mon.vm06 (mon.0) 1070 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.885954+0000 mon.vm06 (mon.0) 1070 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.890731+0000 mon.vm06 (mon.0) 1071 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.890731+0000 mon.vm06 (mon.0) 1071 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.891546+0000 mon.vm06 (mon.0) 1072 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.891546+0000 mon.vm06 (mon.0) 1072 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.892180+0000 mon.vm06 (mon.0) 1073 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.892180+0000 mon.vm06 (mon.0) 1073 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:42.893182+0000 mgr.vm06.qbbldl (mgr.14229) 481 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 116 KiB/s rd, 606 B/s wr, 189 op/s 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:42.893182+0000 mgr.vm06.qbbldl (mgr.14229) 481 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 116 KiB/s rd, 606 B/s wr, 189 op/s 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.896038+0000 mon.vm06 (mon.0) 1074 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.896038+0000 mon.vm06 (mon.0) 1074 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.897689+0000 mon.vm06 (mon.0) 1075 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:42.897689+0000 mon.vm06 (mon.0) 1075 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:43.043992+0000 mgr.vm06.qbbldl (mgr.14229) 482 : audit [DBG] from='client.15550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:43.043992+0000 mgr.vm06.qbbldl (mgr.14229) 482 : audit [DBG] from='client.15550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:43.305271+0000 mon.vm06 (mon.0) 1076 : audit [DBG] from='client.? 192.168.123.106:0/1767154847' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: audit 2026-04-15T13:41:43.305271+0000 mon.vm06 (mon.0) 1076 : audit [DBG] from='client.? 192.168.123.106:0/1767154847' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:43.387661+0000 mon.vm06 (mon.0) 1077 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:43.387661+0000 mon.vm06 (mon.0) 1077 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:43.387688+0000 mon.vm06 (mon.0) 1078 : cluster [INF] Cluster is now healthy 2026-04-15T13:41:44.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:43 vm06 bash[28114]: cluster 2026-04-15T13:41:43.387688+0000 mon.vm06 (mon.0) 1078 : cluster [INF] Cluster is now healthy 2026-04-15T13:41:44.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:42.568337+0000 mgr.vm06.qbbldl (mgr.14229) 479 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 485 B/s wr, 152 op/s 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:42.568337+0000 mgr.vm06.qbbldl (mgr.14229) 479 : cluster [DBG] pgmap v256: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 93 KiB/s rd, 485 B/s wr, 152 op/s 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.827250+0000 mgr.vm06.qbbldl (mgr.14229) 480 : audit [DBG] from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.827250+0000 mgr.vm06.qbbldl (mgr.14229) 480 : audit [DBG] from='client.15546 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.885954+0000 mon.vm06 (mon.0) 1070 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.885954+0000 mon.vm06 (mon.0) 1070 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.890731+0000 mon.vm06 (mon.0) 1071 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.890731+0000 mon.vm06 (mon.0) 1071 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.891546+0000 mon.vm06 (mon.0) 1072 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.891546+0000 mon.vm06 (mon.0) 1072 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.892180+0000 mon.vm06 (mon.0) 1073 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.892180+0000 mon.vm06 (mon.0) 1073 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:42.893182+0000 mgr.vm06.qbbldl (mgr.14229) 481 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 116 KiB/s rd, 606 B/s wr, 189 op/s 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:42.893182+0000 mgr.vm06.qbbldl (mgr.14229) 481 : cluster [DBG] pgmap v257: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 116 KiB/s rd, 606 B/s wr, 189 op/s 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.896038+0000 mon.vm06 (mon.0) 1074 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.896038+0000 mon.vm06 (mon.0) 1074 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.897689+0000 mon.vm06 (mon.0) 1075 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:42.897689+0000 mon.vm06 (mon.0) 1075 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:43.043992+0000 mgr.vm06.qbbldl (mgr.14229) 482 : audit [DBG] from='client.15550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:43.043992+0000 mgr.vm06.qbbldl (mgr.14229) 482 : audit [DBG] from='client.15550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:43.305271+0000 mon.vm06 (mon.0) 1076 : audit [DBG] from='client.? 192.168.123.106:0/1767154847' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: audit 2026-04-15T13:41:43.305271+0000 mon.vm06 (mon.0) 1076 : audit [DBG] from='client.? 192.168.123.106:0/1767154847' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:43.387661+0000 mon.vm06 (mon.0) 1077 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:43.387661+0000 mon.vm06 (mon.0) 1077 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:43.387688+0000 mon.vm06 (mon.0) 1078 : cluster [INF] Cluster is now healthy 2026-04-15T13:41:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:43 vm09 bash[34466]: cluster 2026-04-15T13:41:43.387688+0000 mon.vm06 (mon.0) 1078 : cluster [INF] Cluster is now healthy 2026-04-15T13:41:46.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:45 vm06 bash[28114]: cluster 2026-04-15T13:41:44.893645+0000 mgr.vm06.qbbldl (mgr.14229) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 491 B/s wr, 157 op/s 2026-04-15T13:41:46.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:45 vm06 bash[28114]: cluster 2026-04-15T13:41:44.893645+0000 mgr.vm06.qbbldl (mgr.14229) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 491 B/s wr, 157 op/s 2026-04-15T13:41:46.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:45 vm09 bash[34466]: cluster 2026-04-15T13:41:44.893645+0000 mgr.vm06.qbbldl (mgr.14229) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 491 B/s wr, 157 op/s 2026-04-15T13:41:46.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:45 vm09 bash[34466]: cluster 2026-04-15T13:41:44.893645+0000 mgr.vm06.qbbldl (mgr.14229) 483 : cluster [DBG] pgmap v258: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 96 KiB/s rd, 491 B/s wr, 157 op/s 2026-04-15T13:41:48.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:47 vm06 bash[28114]: cluster 2026-04-15T13:41:46.894087+0000 mgr.vm06.qbbldl (mgr.14229) 484 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 77 KiB/s rd, 396 B/s wr, 127 op/s 2026-04-15T13:41:48.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:47 vm06 bash[28114]: cluster 2026-04-15T13:41:46.894087+0000 mgr.vm06.qbbldl (mgr.14229) 484 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 77 KiB/s rd, 396 B/s wr, 127 op/s 2026-04-15T13:41:48.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:47 vm09 bash[34466]: cluster 2026-04-15T13:41:46.894087+0000 mgr.vm06.qbbldl (mgr.14229) 484 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 77 KiB/s rd, 396 B/s wr, 127 op/s 2026-04-15T13:41:48.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:47 vm09 bash[34466]: cluster 2026-04-15T13:41:46.894087+0000 mgr.vm06.qbbldl (mgr.14229) 484 : cluster [DBG] pgmap v259: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 77 KiB/s rd, 396 B/s wr, 127 op/s 2026-04-15T13:41:48.539 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (11s) 5s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:41:48.751 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop rgw.foo.vm09.iwshxg on host 'vm09' 2026-04-15T13:41:48.977 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:41:49.183 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:49.183 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 6s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:49.183 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (11s) 6s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:41:49.183 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:49.183 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:49.442 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.517635+0000 mgr.vm06.qbbldl (mgr.14229) 485 : audit [DBG] from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.517635+0000 mgr.vm06.qbbldl (mgr.14229) 485 : audit [DBG] from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.735093+0000 mgr.vm06.qbbldl (mgr.14229) 486 : audit [DBG] from='client.15562 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.735093+0000 mgr.vm06.qbbldl (mgr.14229) 486 : audit [DBG] from='client.15562 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: cephadm 2026-04-15T13:41:48.735734+0000 mgr.vm06.qbbldl (mgr.14229) 487 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.iwshxg 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: cephadm 2026-04-15T13:41:48.735734+0000 mgr.vm06.qbbldl (mgr.14229) 487 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.iwshxg 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.742103+0000 mon.vm06 (mon.0) 1079 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.742103+0000 mon.vm06 (mon.0) 1079 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.747871+0000 mon.vm06 (mon.0) 1080 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.747871+0000 mon.vm06 (mon.0) 1080 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.748937+0000 mon.vm06 (mon.0) 1081 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.748937+0000 mon.vm06 (mon.0) 1081 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.750252+0000 mon.vm06 (mon.0) 1082 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.750252+0000 mon.vm06 (mon.0) 1082 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.750772+0000 mon.vm06 (mon.0) 1083 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.750772+0000 mon.vm06 (mon.0) 1083 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.755402+0000 mon.vm06 (mon.0) 1084 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.755402+0000 mon.vm06 (mon.0) 1084 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.757351+0000 mon.vm06 (mon.0) 1085 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.757351+0000 mon.vm06 (mon.0) 1085 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: cluster 2026-04-15T13:41:48.894551+0000 mgr.vm06.qbbldl (mgr.14229) 488 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 396 B/s wr, 85 op/s 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: cluster 2026-04-15T13:41:48.894551+0000 mgr.vm06.qbbldl (mgr.14229) 488 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 396 B/s wr, 85 op/s 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.955520+0000 mgr.vm06.qbbldl (mgr.14229) 489 : audit [DBG] from='client.15566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:48.955520+0000 mgr.vm06.qbbldl (mgr.14229) 489 : audit [DBG] from='client.15566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:49.177815+0000 mgr.vm06.qbbldl (mgr.14229) 490 : audit [DBG] from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:49.177815+0000 mgr.vm06.qbbldl (mgr.14229) 490 : audit [DBG] from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:49.439687+0000 mon.vm06 (mon.0) 1086 : audit [DBG] from='client.? 192.168.123.106:0/1252221225' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:49 vm06 bash[28114]: audit 2026-04-15T13:41:49.439687+0000 mon.vm06 (mon.0) 1086 : audit [DBG] from='client.? 192.168.123.106:0/1252221225' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.517635+0000 mgr.vm06.qbbldl (mgr.14229) 485 : audit [DBG] from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.517635+0000 mgr.vm06.qbbldl (mgr.14229) 485 : audit [DBG] from='client.15558 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.735093+0000 mgr.vm06.qbbldl (mgr.14229) 486 : audit [DBG] from='client.15562 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.735093+0000 mgr.vm06.qbbldl (mgr.14229) 486 : audit [DBG] from='client.15562 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: cephadm 2026-04-15T13:41:48.735734+0000 mgr.vm06.qbbldl (mgr.14229) 487 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.iwshxg 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: cephadm 2026-04-15T13:41:48.735734+0000 mgr.vm06.qbbldl (mgr.14229) 487 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.iwshxg 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.742103+0000 mon.vm06 (mon.0) 1079 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.742103+0000 mon.vm06 (mon.0) 1079 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.747871+0000 mon.vm06 (mon.0) 1080 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.747871+0000 mon.vm06 (mon.0) 1080 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.748937+0000 mon.vm06 (mon.0) 1081 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.748937+0000 mon.vm06 (mon.0) 1081 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.750252+0000 mon.vm06 (mon.0) 1082 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.750252+0000 mon.vm06 (mon.0) 1082 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.750772+0000 mon.vm06 (mon.0) 1083 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.750772+0000 mon.vm06 (mon.0) 1083 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.755402+0000 mon.vm06 (mon.0) 1084 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.755402+0000 mon.vm06 (mon.0) 1084 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.757351+0000 mon.vm06 (mon.0) 1085 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.757351+0000 mon.vm06 (mon.0) 1085 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:41:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: cluster 2026-04-15T13:41:48.894551+0000 mgr.vm06.qbbldl (mgr.14229) 488 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 396 B/s wr, 85 op/s 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: cluster 2026-04-15T13:41:48.894551+0000 mgr.vm06.qbbldl (mgr.14229) 488 : cluster [DBG] pgmap v260: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 52 KiB/s rd, 396 B/s wr, 85 op/s 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.955520+0000 mgr.vm06.qbbldl (mgr.14229) 489 : audit [DBG] from='client.15566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:48.955520+0000 mgr.vm06.qbbldl (mgr.14229) 489 : audit [DBG] from='client.15566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:49.177815+0000 mgr.vm06.qbbldl (mgr.14229) 490 : audit [DBG] from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:49.177815+0000 mgr.vm06.qbbldl (mgr.14229) 490 : audit [DBG] from='client.15570 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:49.439687+0000 mon.vm06 (mon.0) 1086 : audit [DBG] from='client.? 192.168.123.106:0/1252221225' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:49 vm09 bash[34466]: audit 2026-04-15T13:41:49.439687+0000 mon.vm06 (mon.0) 1086 : audit [DBG] from='client.? 192.168.123.106:0/1252221225' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:52.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:51 vm06 bash[28114]: cluster 2026-04-15T13:41:50.895029+0000 mgr.vm06.qbbldl (mgr.14229) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-04-15T13:41:52.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:51 vm06 bash[28114]: cluster 2026-04-15T13:41:50.895029+0000 mgr.vm06.qbbldl (mgr.14229) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-04-15T13:41:52.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:51 vm09 bash[34466]: cluster 2026-04-15T13:41:50.895029+0000 mgr.vm06.qbbldl (mgr.14229) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-04-15T13:41:52.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:51 vm09 bash[34466]: cluster 2026-04-15T13:41:50.895029+0000 mgr.vm06.qbbldl (mgr.14229) 491 : cluster [DBG] pgmap v261: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 14 KiB/s rd, 0 B/s wr, 23 op/s 2026-04-15T13:41:54.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:53 vm06 bash[28114]: cluster 2026-04-15T13:41:52.895438+0000 mgr.vm06.qbbldl (mgr.14229) 492 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:41:54.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:53 vm06 bash[28114]: cluster 2026-04-15T13:41:52.895438+0000 mgr.vm06.qbbldl (mgr.14229) 492 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:41:54.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:53 vm06 bash[28114]: audit 2026-04-15T13:41:53.491754+0000 mon.vm06 (mon.0) 1087 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:54.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:53 vm06 bash[28114]: audit 2026-04-15T13:41:53.491754+0000 mon.vm06 (mon.0) 1087 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:54.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:53 vm06 bash[28114]: audit 2026-04-15T13:41:53.492236+0000 mon.vm06 (mon.0) 1088 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:54.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:53 vm06 bash[28114]: audit 2026-04-15T13:41:53.492236+0000 mon.vm06 (mon.0) 1088 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:54.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:53 vm09 bash[34466]: cluster 2026-04-15T13:41:52.895438+0000 mgr.vm06.qbbldl (mgr.14229) 492 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:41:54.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:53 vm09 bash[34466]: cluster 2026-04-15T13:41:52.895438+0000 mgr.vm06.qbbldl (mgr.14229) 492 : cluster [DBG] pgmap v262: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:41:54.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:53 vm09 bash[34466]: audit 2026-04-15T13:41:53.491754+0000 mon.vm06 (mon.0) 1087 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:54.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:53 vm09 bash[34466]: audit 2026-04-15T13:41:53.491754+0000 mon.vm06 (mon.0) 1087 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:41:54.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:53 vm09 bash[34466]: audit 2026-04-15T13:41:53.492236+0000 mon.vm06 (mon.0) 1088 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:54.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:53 vm09 bash[34466]: audit 2026-04-15T13:41:53.492236+0000 mon.vm06 (mon.0) 1088 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:41:54.681 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:41:54.888 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:41:54.888 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 12s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:41:54.888 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (17s) 12s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:41:54.888 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:41:54.888 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:41:55.146 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:41:56.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: audit 2026-04-15T13:41:54.658924+0000 mgr.vm06.qbbldl (mgr.14229) 493 : audit [DBG] from='client.15578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: audit 2026-04-15T13:41:54.658924+0000 mgr.vm06.qbbldl (mgr.14229) 493 : audit [DBG] from='client.15578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: audit 2026-04-15T13:41:54.881972+0000 mgr.vm06.qbbldl (mgr.14229) 494 : audit [DBG] from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: audit 2026-04-15T13:41:54.881972+0000 mgr.vm06.qbbldl (mgr.14229) 494 : audit [DBG] from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: cluster 2026-04-15T13:41:54.895843+0000 mgr.vm06.qbbldl (mgr.14229) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-15T13:41:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: cluster 2026-04-15T13:41:54.895843+0000 mgr.vm06.qbbldl (mgr.14229) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-15T13:41:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: audit 2026-04-15T13:41:55.143136+0000 mon.vm06 (mon.0) 1089 : audit [DBG] from='client.? 192.168.123.106:0/1720318683' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:55 vm06 bash[28114]: audit 2026-04-15T13:41:55.143136+0000 mon.vm06 (mon.0) 1089 : audit [DBG] from='client.? 192.168.123.106:0/1720318683' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: audit 2026-04-15T13:41:54.658924+0000 mgr.vm06.qbbldl (mgr.14229) 493 : audit [DBG] from='client.15578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: audit 2026-04-15T13:41:54.658924+0000 mgr.vm06.qbbldl (mgr.14229) 493 : audit [DBG] from='client.15578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: audit 2026-04-15T13:41:54.881972+0000 mgr.vm06.qbbldl (mgr.14229) 494 : audit [DBG] from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: audit 2026-04-15T13:41:54.881972+0000 mgr.vm06.qbbldl (mgr.14229) 494 : audit [DBG] from='client.15582 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: cluster 2026-04-15T13:41:54.895843+0000 mgr.vm06.qbbldl (mgr.14229) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: cluster 2026-04-15T13:41:54.895843+0000 mgr.vm06.qbbldl (mgr.14229) 495 : cluster [DBG] pgmap v263: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.7 KiB/s rd, 341 B/s wr, 2 op/s 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: audit 2026-04-15T13:41:55.143136+0000 mon.vm06 (mon.0) 1089 : audit [DBG] from='client.? 192.168.123.106:0/1720318683' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:56.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:55 vm09 bash[34466]: audit 2026-04-15T13:41:55.143136+0000 mon.vm06 (mon.0) 1089 : audit [DBG] from='client.? 192.168.123.106:0/1720318683' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:41:58.265 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:57 vm06 bash[28114]: cluster 2026-04-15T13:41:56.896333+0000 mgr.vm06.qbbldl (mgr.14229) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:58.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:41:57 vm06 bash[28114]: cluster 2026-04-15T13:41:56.896333+0000 mgr.vm06.qbbldl (mgr.14229) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:58.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:57 vm09 bash[34466]: cluster 2026-04-15T13:41:56.896333+0000 mgr.vm06.qbbldl (mgr.14229) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:41:58.358 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:41:57 vm09 bash[34466]: cluster 2026-04-15T13:41:56.896333+0000 mgr.vm06.qbbldl (mgr.14229) 496 : cluster [DBG] pgmap v264: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:00.370 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:00.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: cluster 2026-04-15T13:41:58.896787+0000 mgr.vm06.qbbldl (mgr.14229) 497 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: cluster 2026-04-15T13:41:58.896787+0000 mgr.vm06.qbbldl (mgr.14229) 497 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: audit 2026-04-15T13:41:59.253137+0000 mon.vm06 (mon.0) 1090 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: audit 2026-04-15T13:41:59.253137+0000 mon.vm06 (mon.0) 1090 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: audit 2026-04-15T13:41:59.258152+0000 mon.vm06 (mon.0) 1091 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: audit 2026-04-15T13:41:59.258152+0000 mon.vm06 (mon.0) 1091 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: audit 2026-04-15T13:41:59.259131+0000 mon.vm06 (mon.0) 1092 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:42:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:00 vm06 bash[28114]: audit 2026-04-15T13:41:59.259131+0000 mon.vm06 (mon.0) 1092 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:42:00.570 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:00.571 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 17s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:00.571 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (23s) 17s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:00.571 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c175413c88f3 2026-04-15T13:42:00.571 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2m ago 6m 111M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: cluster 2026-04-15T13:41:58.896787+0000 mgr.vm06.qbbldl (mgr.14229) 497 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: cluster 2026-04-15T13:41:58.896787+0000 mgr.vm06.qbbldl (mgr.14229) 497 : cluster [DBG] pgmap v265: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: audit 2026-04-15T13:41:59.253137+0000 mon.vm06 (mon.0) 1090 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: audit 2026-04-15T13:41:59.253137+0000 mon.vm06 (mon.0) 1090 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: audit 2026-04-15T13:41:59.258152+0000 mon.vm06 (mon.0) 1091 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: audit 2026-04-15T13:41:59.258152+0000 mon.vm06 (mon.0) 1091 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: audit 2026-04-15T13:41:59.259131+0000 mon.vm06 (mon.0) 1092 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:42:00.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:00 vm09 bash[34466]: audit 2026-04-15T13:41:59.259131+0000 mon.vm06 (mon.0) 1092 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:42:00.833 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:42:01.515 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:01 vm06 bash[28114]: audit 2026-04-15T13:42:00.348394+0000 mgr.vm06.qbbldl (mgr.14229) 498 : audit [DBG] from='client.15590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:01.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:01 vm06 bash[28114]: audit 2026-04-15T13:42:00.348394+0000 mgr.vm06.qbbldl (mgr.14229) 498 : audit [DBG] from='client.15590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:01.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:01 vm06 bash[28114]: audit 2026-04-15T13:42:00.830201+0000 mon.vm06 (mon.0) 1093 : audit [DBG] from='client.? 192.168.123.106:0/1283055582' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:01.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:01 vm06 bash[28114]: audit 2026-04-15T13:42:00.830201+0000 mon.vm06 (mon.0) 1093 : audit [DBG] from='client.? 192.168.123.106:0/1283055582' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:01 vm09 bash[34466]: audit 2026-04-15T13:42:00.348394+0000 mgr.vm06.qbbldl (mgr.14229) 498 : audit [DBG] from='client.15590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:01 vm09 bash[34466]: audit 2026-04-15T13:42:00.348394+0000 mgr.vm06.qbbldl (mgr.14229) 498 : audit [DBG] from='client.15590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:01 vm09 bash[34466]: audit 2026-04-15T13:42:00.830201+0000 mon.vm06 (mon.0) 1093 : audit [DBG] from='client.? 192.168.123.106:0/1283055582' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:01 vm09 bash[34466]: audit 2026-04-15T13:42:00.830201+0000 mon.vm06 (mon.0) 1093 : audit [DBG] from='client.? 192.168.123.106:0/1283055582' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:02 vm09 bash[34466]: audit 2026-04-15T13:42:00.564992+0000 mgr.vm06.qbbldl (mgr.14229) 499 : audit [DBG] from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:02 vm09 bash[34466]: audit 2026-04-15T13:42:00.564992+0000 mgr.vm06.qbbldl (mgr.14229) 499 : audit [DBG] from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:02 vm09 bash[34466]: cluster 2026-04-15T13:42:00.897276+0000 mgr.vm06.qbbldl (mgr.14229) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:02 vm09 bash[34466]: cluster 2026-04-15T13:42:00.897276+0000 mgr.vm06.qbbldl (mgr.14229) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:02 vm06 bash[28114]: audit 2026-04-15T13:42:00.564992+0000 mgr.vm06.qbbldl (mgr.14229) 499 : audit [DBG] from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:02 vm06 bash[28114]: audit 2026-04-15T13:42:00.564992+0000 mgr.vm06.qbbldl (mgr.14229) 499 : audit [DBG] from='client.15594 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:02 vm06 bash[28114]: cluster 2026-04-15T13:42:00.897276+0000 mgr.vm06.qbbldl (mgr.14229) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:02 vm06 bash[28114]: cluster 2026-04-15T13:42:00.897276+0000 mgr.vm06.qbbldl (mgr.14229) 500 : cluster [DBG] pgmap v266: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: cluster 2026-04-15T13:42:02.897743+0000 mgr.vm06.qbbldl (mgr.14229) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: cluster 2026-04-15T13:42:02.897743+0000 mgr.vm06.qbbldl (mgr.14229) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.140850+0000 mon.vm06 (mon.0) 1094 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.140850+0000 mon.vm06 (mon.0) 1094 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.145571+0000 mon.vm06 (mon.0) 1095 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.145571+0000 mon.vm06 (mon.0) 1095 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.146484+0000 mon.vm06 (mon.0) 1096 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.146484+0000 mon.vm06 (mon.0) 1096 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.147184+0000 mon.vm06 (mon.0) 1097 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.147184+0000 mon.vm06 (mon.0) 1097 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.150980+0000 mon.vm06 (mon.0) 1098 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.150980+0000 mon.vm06 (mon.0) 1098 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.152515+0000 mon.vm06 (mon.0) 1099 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:42:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:04 vm09 bash[34466]: audit 2026-04-15T13:42:04.152515+0000 mon.vm06 (mon.0) 1099 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: cluster 2026-04-15T13:42:02.897743+0000 mgr.vm06.qbbldl (mgr.14229) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: cluster 2026-04-15T13:42:02.897743+0000 mgr.vm06.qbbldl (mgr.14229) 501 : cluster [DBG] pgmap v267: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.140850+0000 mon.vm06 (mon.0) 1094 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.140850+0000 mon.vm06 (mon.0) 1094 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.145571+0000 mon.vm06 (mon.0) 1095 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.145571+0000 mon.vm06 (mon.0) 1095 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.146484+0000 mon.vm06 (mon.0) 1096 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.146484+0000 mon.vm06 (mon.0) 1096 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.147184+0000 mon.vm06 (mon.0) 1097 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.147184+0000 mon.vm06 (mon.0) 1097 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.150980+0000 mon.vm06 (mon.0) 1098 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.150980+0000 mon.vm06 (mon.0) 1098 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.152515+0000 mon.vm06 (mon.0) 1099 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:42:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:04 vm06 bash[28114]: audit 2026-04-15T13:42:04.152515+0000 mon.vm06 (mon.0) 1099 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:42:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:05 vm09 bash[34466]: cluster 2026-04-15T13:42:04.148376+0000 mgr.vm06.qbbldl (mgr.14229) 502 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:05 vm09 bash[34466]: cluster 2026-04-15T13:42:04.148376+0000 mgr.vm06.qbbldl (mgr.14229) 502 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:05 vm09 bash[34466]: cluster 2026-04-15T13:42:05.144672+0000 mon.vm06 (mon.0) 1100 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:42:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:05 vm09 bash[34466]: cluster 2026-04-15T13:42:05.144672+0000 mon.vm06 (mon.0) 1100 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:42:05.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:05 vm06 bash[28114]: cluster 2026-04-15T13:42:04.148376+0000 mgr.vm06.qbbldl (mgr.14229) 502 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:05 vm06 bash[28114]: cluster 2026-04-15T13:42:04.148376+0000 mgr.vm06.qbbldl (mgr.14229) 502 : cluster [DBG] pgmap v268: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:05 vm06 bash[28114]: cluster 2026-04-15T13:42:05.144672+0000 mon.vm06 (mon.0) 1100 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:42:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:05 vm06 bash[28114]: cluster 2026-04-15T13:42:05.144672+0000 mon.vm06 (mon.0) 1100 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:42:06.055 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:06.241 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:06.241 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 23s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:06.241 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (28s) 23s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:06.241 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2s ago 6m - - 2026-04-15T13:42:06.242 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 2s ago 6m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:06.492 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:06.492 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:06.492 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: audit 2026-04-15T13:42:06.034454+0000 mgr.vm06.qbbldl (mgr.14229) 503 : audit [DBG] from='client.15602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: audit 2026-04-15T13:42:06.034454+0000 mgr.vm06.qbbldl (mgr.14229) 503 : audit [DBG] from='client.15602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: cluster 2026-04-15T13:42:06.148793+0000 mgr.vm06.qbbldl (mgr.14229) 504 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: cluster 2026-04-15T13:42:06.148793+0000 mgr.vm06.qbbldl (mgr.14229) 504 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: audit 2026-04-15T13:42:06.235955+0000 mgr.vm06.qbbldl (mgr.14229) 505 : audit [DBG] from='client.15606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: audit 2026-04-15T13:42:06.235955+0000 mgr.vm06.qbbldl (mgr.14229) 505 : audit [DBG] from='client.15606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: audit 2026-04-15T13:42:06.489031+0000 mon.vm06 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.106:0/3004143195' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:07 vm09 bash[34466]: audit 2026-04-15T13:42:06.489031+0000 mon.vm06 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.106:0/3004143195' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: audit 2026-04-15T13:42:06.034454+0000 mgr.vm06.qbbldl (mgr.14229) 503 : audit [DBG] from='client.15602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: audit 2026-04-15T13:42:06.034454+0000 mgr.vm06.qbbldl (mgr.14229) 503 : audit [DBG] from='client.15602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: cluster 2026-04-15T13:42:06.148793+0000 mgr.vm06.qbbldl (mgr.14229) 504 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: cluster 2026-04-15T13:42:06.148793+0000 mgr.vm06.qbbldl (mgr.14229) 504 : cluster [DBG] pgmap v269: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: audit 2026-04-15T13:42:06.235955+0000 mgr.vm06.qbbldl (mgr.14229) 505 : audit [DBG] from='client.15606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: audit 2026-04-15T13:42:06.235955+0000 mgr.vm06.qbbldl (mgr.14229) 505 : audit [DBG] from='client.15606 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: audit 2026-04-15T13:42:06.489031+0000 mon.vm06 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.106:0/3004143195' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:07 vm06 bash[28114]: audit 2026-04-15T13:42:06.489031+0000 mon.vm06 (mon.0) 1101 : audit [DBG] from='client.? 192.168.123.106:0/3004143195' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:09 vm09 bash[34466]: cluster 2026-04-15T13:42:08.149348+0000 mgr.vm06.qbbldl (mgr.14229) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:09 vm09 bash[34466]: cluster 2026-04-15T13:42:08.149348+0000 mgr.vm06.qbbldl (mgr.14229) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:09 vm09 bash[34466]: audit 2026-04-15T13:42:08.493099+0000 mon.vm06 (mon.0) 1102 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:09 vm09 bash[34466]: audit 2026-04-15T13:42:08.493099+0000 mon.vm06 (mon.0) 1102 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:09 vm09 bash[34466]: audit 2026-04-15T13:42:08.493654+0000 mon.vm06 (mon.0) 1103 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:09 vm09 bash[34466]: audit 2026-04-15T13:42:08.493654+0000 mon.vm06 (mon.0) 1103 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:09.765 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:09 vm06 bash[28114]: cluster 2026-04-15T13:42:08.149348+0000 mgr.vm06.qbbldl (mgr.14229) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:09 vm06 bash[28114]: cluster 2026-04-15T13:42:08.149348+0000 mgr.vm06.qbbldl (mgr.14229) 506 : cluster [DBG] pgmap v270: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:09 vm06 bash[28114]: audit 2026-04-15T13:42:08.493099+0000 mon.vm06 (mon.0) 1102 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:09 vm06 bash[28114]: audit 2026-04-15T13:42:08.493099+0000 mon.vm06 (mon.0) 1102 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:42:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:09 vm06 bash[28114]: audit 2026-04-15T13:42:08.493654+0000 mon.vm06 (mon.0) 1103 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:09 vm06 bash[28114]: audit 2026-04-15T13:42:08.493654+0000 mon.vm06 (mon.0) 1103 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:11 vm09 bash[34466]: cluster 2026-04-15T13:42:10.149832+0000 mgr.vm06.qbbldl (mgr.14229) 507 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:11 vm09 bash[34466]: cluster 2026-04-15T13:42:10.149832+0000 mgr.vm06.qbbldl (mgr.14229) 507 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:11.697 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:11 vm06 bash[28114]: cluster 2026-04-15T13:42:10.149832+0000 mgr.vm06.qbbldl (mgr.14229) 507 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:11 vm06 bash[28114]: cluster 2026-04-15T13:42:10.149832+0000 mgr.vm06.qbbldl (mgr.14229) 507 : cluster [DBG] pgmap v271: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:11.886 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:11.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 29s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:11.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (34s) 29s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:11.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 7s ago 6m - - 2026-04-15T13:42:11.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 7s ago 6m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:12.126 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:12.126 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:12.126 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:12.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:12 vm09 bash[34466]: audit 2026-04-15T13:42:12.123274+0000 mon.vm06 (mon.0) 1104 : audit [DBG] from='client.? 192.168.123.106:0/3709710092' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:12.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:12 vm09 bash[34466]: audit 2026-04-15T13:42:12.123274+0000 mon.vm06 (mon.0) 1104 : audit [DBG] from='client.? 192.168.123.106:0/3709710092' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:12 vm06 bash[28114]: audit 2026-04-15T13:42:12.123274+0000 mon.vm06 (mon.0) 1104 : audit [DBG] from='client.? 192.168.123.106:0/3709710092' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:12 vm06 bash[28114]: audit 2026-04-15T13:42:12.123274+0000 mon.vm06 (mon.0) 1104 : audit [DBG] from='client.? 192.168.123.106:0/3709710092' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:13 vm09 bash[34466]: audit 2026-04-15T13:42:11.677827+0000 mgr.vm06.qbbldl (mgr.14229) 508 : audit [DBG] from='client.15614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:13 vm09 bash[34466]: audit 2026-04-15T13:42:11.677827+0000 mgr.vm06.qbbldl (mgr.14229) 508 : audit [DBG] from='client.15614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:13 vm09 bash[34466]: audit 2026-04-15T13:42:11.880815+0000 mgr.vm06.qbbldl (mgr.14229) 509 : audit [DBG] from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:13 vm09 bash[34466]: audit 2026-04-15T13:42:11.880815+0000 mgr.vm06.qbbldl (mgr.14229) 509 : audit [DBG] from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:13 vm09 bash[34466]: cluster 2026-04-15T13:42:12.150217+0000 mgr.vm06.qbbldl (mgr.14229) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:13.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:13 vm09 bash[34466]: cluster 2026-04-15T13:42:12.150217+0000 mgr.vm06.qbbldl (mgr.14229) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:13 vm06 bash[28114]: audit 2026-04-15T13:42:11.677827+0000 mgr.vm06.qbbldl (mgr.14229) 508 : audit [DBG] from='client.15614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:13 vm06 bash[28114]: audit 2026-04-15T13:42:11.677827+0000 mgr.vm06.qbbldl (mgr.14229) 508 : audit [DBG] from='client.15614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:13 vm06 bash[28114]: audit 2026-04-15T13:42:11.880815+0000 mgr.vm06.qbbldl (mgr.14229) 509 : audit [DBG] from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:13 vm06 bash[28114]: audit 2026-04-15T13:42:11.880815+0000 mgr.vm06.qbbldl (mgr.14229) 509 : audit [DBG] from='client.15618 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:13 vm06 bash[28114]: cluster 2026-04-15T13:42:12.150217+0000 mgr.vm06.qbbldl (mgr.14229) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:13 vm06 bash[28114]: cluster 2026-04-15T13:42:12.150217+0000 mgr.vm06.qbbldl (mgr.14229) 510 : cluster [DBG] pgmap v272: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:15 vm09 bash[34466]: cluster 2026-04-15T13:42:14.150637+0000 mgr.vm06.qbbldl (mgr.14229) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:15.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:15 vm09 bash[34466]: cluster 2026-04-15T13:42:14.150637+0000 mgr.vm06.qbbldl (mgr.14229) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:15 vm06 bash[28114]: cluster 2026-04-15T13:42:14.150637+0000 mgr.vm06.qbbldl (mgr.14229) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:15 vm06 bash[28114]: cluster 2026-04-15T13:42:14.150637+0000 mgr.vm06.qbbldl (mgr.14229) 511 : cluster [DBG] pgmap v273: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 181 B/s rd, 363 B/s wr, 0 op/s 2026-04-15T13:42:17.336 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:17.531 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:17.531 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (5m) 34s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:17.532 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (40s) 34s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:17.532 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 13s ago 6m - - 2026-04-15T13:42:17.532 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 13s ago 6m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:17 vm09 bash[34466]: cluster 2026-04-15T13:42:16.151121+0000 mgr.vm06.qbbldl (mgr.14229) 512 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:17 vm09 bash[34466]: cluster 2026-04-15T13:42:16.151121+0000 mgr.vm06.qbbldl (mgr.14229) 512 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:17 vm06 bash[28114]: cluster 2026-04-15T13:42:16.151121+0000 mgr.vm06.qbbldl (mgr.14229) 512 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:17 vm06 bash[28114]: cluster 2026-04-15T13:42:16.151121+0000 mgr.vm06.qbbldl (mgr.14229) 512 : cluster [DBG] pgmap v274: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:17.792 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:17.792 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:17.792 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:18 vm09 bash[34466]: audit 2026-04-15T13:42:17.316012+0000 mgr.vm06.qbbldl (mgr.14229) 513 : audit [DBG] from='client.15626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:18 vm09 bash[34466]: audit 2026-04-15T13:42:17.316012+0000 mgr.vm06.qbbldl (mgr.14229) 513 : audit [DBG] from='client.15626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:18 vm09 bash[34466]: audit 2026-04-15T13:42:17.789640+0000 mon.vm06 (mon.0) 1105 : audit [DBG] from='client.? 192.168.123.106:0/1945864901' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:18 vm09 bash[34466]: audit 2026-04-15T13:42:17.789640+0000 mon.vm06 (mon.0) 1105 : audit [DBG] from='client.? 192.168.123.106:0/1945864901' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:18 vm06 bash[28114]: audit 2026-04-15T13:42:17.316012+0000 mgr.vm06.qbbldl (mgr.14229) 513 : audit [DBG] from='client.15626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:18 vm06 bash[28114]: audit 2026-04-15T13:42:17.316012+0000 mgr.vm06.qbbldl (mgr.14229) 513 : audit [DBG] from='client.15626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:18 vm06 bash[28114]: audit 2026-04-15T13:42:17.789640+0000 mon.vm06 (mon.0) 1105 : audit [DBG] from='client.? 192.168.123.106:0/1945864901' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:18 vm06 bash[28114]: audit 2026-04-15T13:42:17.789640+0000 mon.vm06 (mon.0) 1105 : audit [DBG] from='client.? 192.168.123.106:0/1945864901' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:19 vm09 bash[34466]: audit 2026-04-15T13:42:17.525894+0000 mgr.vm06.qbbldl (mgr.14229) 514 : audit [DBG] from='client.15630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:19 vm09 bash[34466]: audit 2026-04-15T13:42:17.525894+0000 mgr.vm06.qbbldl (mgr.14229) 514 : audit [DBG] from='client.15630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:19 vm09 bash[34466]: cluster 2026-04-15T13:42:18.151614+0000 mgr.vm06.qbbldl (mgr.14229) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:19.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:19 vm09 bash[34466]: cluster 2026-04-15T13:42:18.151614+0000 mgr.vm06.qbbldl (mgr.14229) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:19.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:19 vm06 bash[28114]: audit 2026-04-15T13:42:17.525894+0000 mgr.vm06.qbbldl (mgr.14229) 514 : audit [DBG] from='client.15630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:19.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:19 vm06 bash[28114]: audit 2026-04-15T13:42:17.525894+0000 mgr.vm06.qbbldl (mgr.14229) 514 : audit [DBG] from='client.15630 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:19.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:19 vm06 bash[28114]: cluster 2026-04-15T13:42:18.151614+0000 mgr.vm06.qbbldl (mgr.14229) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:19.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:19 vm06 bash[28114]: cluster 2026-04-15T13:42:18.151614+0000 mgr.vm06.qbbldl (mgr.14229) 515 : cluster [DBG] pgmap v275: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:21 vm09 bash[34466]: cluster 2026-04-15T13:42:20.152070+0000 mgr.vm06.qbbldl (mgr.14229) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:21.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:21 vm09 bash[34466]: cluster 2026-04-15T13:42:20.152070+0000 mgr.vm06.qbbldl (mgr.14229) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:21 vm06 bash[28114]: cluster 2026-04-15T13:42:20.152070+0000 mgr.vm06.qbbldl (mgr.14229) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:21 vm06 bash[28114]: cluster 2026-04-15T13:42:20.152070+0000 mgr.vm06.qbbldl (mgr.14229) 516 : cluster [DBG] pgmap v276: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:23.007 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:23.196 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:23.196 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 40s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:23.196 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (45s) 40s ago 6m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:23.196 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 19s ago 6m - - 2026-04-15T13:42:23.196 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (6m) 19s ago 6m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:23.446 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:23.446 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:23.446 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:23 vm06 bash[28114]: cluster 2026-04-15T13:42:22.152524+0000 mgr.vm06.qbbldl (mgr.14229) 517 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:23 vm06 bash[28114]: cluster 2026-04-15T13:42:22.152524+0000 mgr.vm06.qbbldl (mgr.14229) 517 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:23.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:23 vm09 bash[34466]: cluster 2026-04-15T13:42:22.152524+0000 mgr.vm06.qbbldl (mgr.14229) 517 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:23.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:23 vm09 bash[34466]: cluster 2026-04-15T13:42:22.152524+0000 mgr.vm06.qbbldl (mgr.14229) 517 : cluster [DBG] pgmap v277: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:22.986079+0000 mgr.vm06.qbbldl (mgr.14229) 518 : audit [DBG] from='client.15638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:22.986079+0000 mgr.vm06.qbbldl (mgr.14229) 518 : audit [DBG] from='client.15638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:23.189906+0000 mgr.vm06.qbbldl (mgr.14229) 519 : audit [DBG] from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:23.189906+0000 mgr.vm06.qbbldl (mgr.14229) 519 : audit [DBG] from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:23.443065+0000 mon.vm06 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.106:0/2634698656' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:23.443065+0000 mon.vm06 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.106:0/2634698656' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:23.488086+0000 mon.vm06 (mon.0) 1107 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:24 vm06 bash[28114]: audit 2026-04-15T13:42:23.488086+0000 mon.vm06 (mon.0) 1107 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:22.986079+0000 mgr.vm06.qbbldl (mgr.14229) 518 : audit [DBG] from='client.15638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:22.986079+0000 mgr.vm06.qbbldl (mgr.14229) 518 : audit [DBG] from='client.15638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:23.189906+0000 mgr.vm06.qbbldl (mgr.14229) 519 : audit [DBG] from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:23.189906+0000 mgr.vm06.qbbldl (mgr.14229) 519 : audit [DBG] from='client.15642 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:23.443065+0000 mon.vm06 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.106:0/2634698656' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:23.443065+0000 mon.vm06 (mon.0) 1106 : audit [DBG] from='client.? 192.168.123.106:0/2634698656' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:23.488086+0000 mon.vm06 (mon.0) 1107 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:24 vm09 bash[34466]: audit 2026-04-15T13:42:23.488086+0000 mon.vm06 (mon.0) 1107 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:25.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:25 vm06 bash[28114]: cluster 2026-04-15T13:42:24.153129+0000 mgr.vm06.qbbldl (mgr.14229) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:25.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:25 vm06 bash[28114]: cluster 2026-04-15T13:42:24.153129+0000 mgr.vm06.qbbldl (mgr.14229) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:25.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:25 vm09 bash[34466]: cluster 2026-04-15T13:42:24.153129+0000 mgr.vm06.qbbldl (mgr.14229) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:25.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:25 vm09 bash[34466]: cluster 2026-04-15T13:42:24.153129+0000 mgr.vm06.qbbldl (mgr.14229) 520 : cluster [DBG] pgmap v278: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:27 vm06 bash[28114]: cluster 2026-04-15T13:42:26.153597+0000 mgr.vm06.qbbldl (mgr.14229) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:27 vm06 bash[28114]: cluster 2026-04-15T13:42:26.153597+0000 mgr.vm06.qbbldl (mgr.14229) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:27.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:27 vm09 bash[34466]: cluster 2026-04-15T13:42:26.153597+0000 mgr.vm06.qbbldl (mgr.14229) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:27.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:27 vm09 bash[34466]: cluster 2026-04-15T13:42:26.153597+0000 mgr.vm06.qbbldl (mgr.14229) 521 : cluster [DBG] pgmap v279: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:28.677 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:28.878 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:28.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 45s ago 6m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:28.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (51s) 45s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:28.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 24s ago 7m - - 2026-04-15T13:42:28.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 24s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:29.117 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:29.118 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:29.118 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:29 vm06 bash[28114]: cluster 2026-04-15T13:42:28.154114+0000 mgr.vm06.qbbldl (mgr.14229) 522 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:29 vm06 bash[28114]: cluster 2026-04-15T13:42:28.154114+0000 mgr.vm06.qbbldl (mgr.14229) 522 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:29 vm06 bash[28114]: audit 2026-04-15T13:42:29.114604+0000 mon.vm06 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.106:0/319687579' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:29 vm06 bash[28114]: audit 2026-04-15T13:42:29.114604+0000 mon.vm06 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.106:0/319687579' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:29 vm09 bash[34466]: cluster 2026-04-15T13:42:28.154114+0000 mgr.vm06.qbbldl (mgr.14229) 522 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:29 vm09 bash[34466]: cluster 2026-04-15T13:42:28.154114+0000 mgr.vm06.qbbldl (mgr.14229) 522 : cluster [DBG] pgmap v280: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:29 vm09 bash[34466]: audit 2026-04-15T13:42:29.114604+0000 mon.vm06 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.106:0/319687579' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:29 vm09 bash[34466]: audit 2026-04-15T13:42:29.114604+0000 mon.vm06 (mon.0) 1108 : audit [DBG] from='client.? 192.168.123.106:0/319687579' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:30 vm06 bash[28114]: audit 2026-04-15T13:42:28.656659+0000 mgr.vm06.qbbldl (mgr.14229) 523 : audit [DBG] from='client.15650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:30 vm06 bash[28114]: audit 2026-04-15T13:42:28.656659+0000 mgr.vm06.qbbldl (mgr.14229) 523 : audit [DBG] from='client.15650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:30 vm06 bash[28114]: audit 2026-04-15T13:42:28.871804+0000 mgr.vm06.qbbldl (mgr.14229) 524 : audit [DBG] from='client.15654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:30 vm06 bash[28114]: audit 2026-04-15T13:42:28.871804+0000 mgr.vm06.qbbldl (mgr.14229) 524 : audit [DBG] from='client.15654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:30 vm09 bash[34466]: audit 2026-04-15T13:42:28.656659+0000 mgr.vm06.qbbldl (mgr.14229) 523 : audit [DBG] from='client.15650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:30 vm09 bash[34466]: audit 2026-04-15T13:42:28.656659+0000 mgr.vm06.qbbldl (mgr.14229) 523 : audit [DBG] from='client.15650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:30 vm09 bash[34466]: audit 2026-04-15T13:42:28.871804+0000 mgr.vm06.qbbldl (mgr.14229) 524 : audit [DBG] from='client.15654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:30.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:30 vm09 bash[34466]: audit 2026-04-15T13:42:28.871804+0000 mgr.vm06.qbbldl (mgr.14229) 524 : audit [DBG] from='client.15654 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:31 vm06 bash[28114]: cluster 2026-04-15T13:42:30.154541+0000 mgr.vm06.qbbldl (mgr.14229) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:31 vm06 bash[28114]: cluster 2026-04-15T13:42:30.154541+0000 mgr.vm06.qbbldl (mgr.14229) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:31 vm09 bash[34466]: cluster 2026-04-15T13:42:30.154541+0000 mgr.vm06.qbbldl (mgr.14229) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:31 vm09 bash[34466]: cluster 2026-04-15T13:42:30.154541+0000 mgr.vm06.qbbldl (mgr.14229) 525 : cluster [DBG] pgmap v281: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:33 vm06 bash[28114]: cluster 2026-04-15T13:42:32.155024+0000 mgr.vm06.qbbldl (mgr.14229) 526 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:33 vm06 bash[28114]: cluster 2026-04-15T13:42:32.155024+0000 mgr.vm06.qbbldl (mgr.14229) 526 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:33.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:33 vm09 bash[34466]: cluster 2026-04-15T13:42:32.155024+0000 mgr.vm06.qbbldl (mgr.14229) 526 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:33.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:33 vm09 bash[34466]: cluster 2026-04-15T13:42:32.155024+0000 mgr.vm06.qbbldl (mgr.14229) 526 : cluster [DBG] pgmap v282: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:34.343 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:34.540 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:34.540 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 51s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:34.540 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (57s) 51s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:34.540 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 30s ago 7m - - 2026-04-15T13:42:34.540 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 30s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:34.797 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:34.797 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:34.797 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:35 vm06 bash[28114]: cluster 2026-04-15T13:42:34.155531+0000 mgr.vm06.qbbldl (mgr.14229) 527 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:35 vm06 bash[28114]: cluster 2026-04-15T13:42:34.155531+0000 mgr.vm06.qbbldl (mgr.14229) 527 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:35 vm06 bash[28114]: audit 2026-04-15T13:42:34.323031+0000 mgr.vm06.qbbldl (mgr.14229) 528 : audit [DBG] from='client.15662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:35 vm06 bash[28114]: audit 2026-04-15T13:42:34.323031+0000 mgr.vm06.qbbldl (mgr.14229) 528 : audit [DBG] from='client.15662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:35 vm06 bash[28114]: audit 2026-04-15T13:42:34.794016+0000 mon.vm06 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.106:0/1469327692' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:35 vm06 bash[28114]: audit 2026-04-15T13:42:34.794016+0000 mon.vm06 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.106:0/1469327692' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:35 vm09 bash[34466]: cluster 2026-04-15T13:42:34.155531+0000 mgr.vm06.qbbldl (mgr.14229) 527 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:35 vm09 bash[34466]: cluster 2026-04-15T13:42:34.155531+0000 mgr.vm06.qbbldl (mgr.14229) 527 : cluster [DBG] pgmap v283: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:35 vm09 bash[34466]: audit 2026-04-15T13:42:34.323031+0000 mgr.vm06.qbbldl (mgr.14229) 528 : audit [DBG] from='client.15662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:35 vm09 bash[34466]: audit 2026-04-15T13:42:34.323031+0000 mgr.vm06.qbbldl (mgr.14229) 528 : audit [DBG] from='client.15662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:35 vm09 bash[34466]: audit 2026-04-15T13:42:34.794016+0000 mon.vm06 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.106:0/1469327692' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:35 vm09 bash[34466]: audit 2026-04-15T13:42:34.794016+0000 mon.vm06 (mon.0) 1109 : audit [DBG] from='client.? 192.168.123.106:0/1469327692' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:36.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:36 vm06 bash[28114]: audit 2026-04-15T13:42:34.534219+0000 mgr.vm06.qbbldl (mgr.14229) 529 : audit [DBG] from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:36.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:36 vm06 bash[28114]: audit 2026-04-15T13:42:34.534219+0000 mgr.vm06.qbbldl (mgr.14229) 529 : audit [DBG] from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:36.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:36 vm09 bash[34466]: audit 2026-04-15T13:42:34.534219+0000 mgr.vm06.qbbldl (mgr.14229) 529 : audit [DBG] from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:36.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:36 vm09 bash[34466]: audit 2026-04-15T13:42:34.534219+0000 mgr.vm06.qbbldl (mgr.14229) 529 : audit [DBG] from='client.15666 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:37.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:37 vm06 bash[28114]: cluster 2026-04-15T13:42:36.156088+0000 mgr.vm06.qbbldl (mgr.14229) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:37.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:37 vm06 bash[28114]: cluster 2026-04-15T13:42:36.156088+0000 mgr.vm06.qbbldl (mgr.14229) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:37 vm09 bash[34466]: cluster 2026-04-15T13:42:36.156088+0000 mgr.vm06.qbbldl (mgr.14229) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:37.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:37 vm09 bash[34466]: cluster 2026-04-15T13:42:36.156088+0000 mgr.vm06.qbbldl (mgr.14229) 530 : cluster [DBG] pgmap v284: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:38 vm06 bash[28114]: cluster 2026-04-15T13:42:38.156525+0000 mgr.vm06.qbbldl (mgr.14229) 531 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:38 vm06 bash[28114]: cluster 2026-04-15T13:42:38.156525+0000 mgr.vm06.qbbldl (mgr.14229) 531 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:38.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:38 vm09 bash[34466]: cluster 2026-04-15T13:42:38.156525+0000 mgr.vm06.qbbldl (mgr.14229) 531 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:38 vm09 bash[34466]: cluster 2026-04-15T13:42:38.156525+0000 mgr.vm06.qbbldl (mgr.14229) 531 : cluster [DBG] pgmap v285: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:39 vm06 bash[28114]: audit 2026-04-15T13:42:38.488417+0000 mon.vm06 (mon.0) 1110 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:39 vm06 bash[28114]: audit 2026-04-15T13:42:38.488417+0000 mon.vm06 (mon.0) 1110 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:39.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:39 vm09 bash[34466]: audit 2026-04-15T13:42:38.488417+0000 mon.vm06 (mon.0) 1110 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:39.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:39 vm09 bash[34466]: audit 2026-04-15T13:42:38.488417+0000 mon.vm06 (mon.0) 1110 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:40.025 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:40.223 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:40.223 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 57s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:40.223 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (62s) 57s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:40.223 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 36s ago 7m - - 2026-04-15T13:42:40.223 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 36s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:40.482 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:40.482 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:40.482 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:40 vm06 bash[28114]: audit 2026-04-15T13:42:39.997522+0000 mgr.vm06.qbbldl (mgr.14229) 532 : audit [DBG] from='client.15674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:40 vm06 bash[28114]: audit 2026-04-15T13:42:39.997522+0000 mgr.vm06.qbbldl (mgr.14229) 532 : audit [DBG] from='client.15674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:40 vm06 bash[28114]: cluster 2026-04-15T13:42:40.157076+0000 mgr.vm06.qbbldl (mgr.14229) 533 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:40 vm06 bash[28114]: cluster 2026-04-15T13:42:40.157076+0000 mgr.vm06.qbbldl (mgr.14229) 533 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:40 vm06 bash[28114]: audit 2026-04-15T13:42:40.217463+0000 mgr.vm06.qbbldl (mgr.14229) 534 : audit [DBG] from='client.15678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:40 vm06 bash[28114]: audit 2026-04-15T13:42:40.217463+0000 mgr.vm06.qbbldl (mgr.14229) 534 : audit [DBG] from='client.15678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:40 vm09 bash[34466]: audit 2026-04-15T13:42:39.997522+0000 mgr.vm06.qbbldl (mgr.14229) 532 : audit [DBG] from='client.15674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:40 vm09 bash[34466]: audit 2026-04-15T13:42:39.997522+0000 mgr.vm06.qbbldl (mgr.14229) 532 : audit [DBG] from='client.15674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:40 vm09 bash[34466]: cluster 2026-04-15T13:42:40.157076+0000 mgr.vm06.qbbldl (mgr.14229) 533 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:40 vm09 bash[34466]: cluster 2026-04-15T13:42:40.157076+0000 mgr.vm06.qbbldl (mgr.14229) 533 : cluster [DBG] pgmap v286: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:40 vm09 bash[34466]: audit 2026-04-15T13:42:40.217463+0000 mgr.vm06.qbbldl (mgr.14229) 534 : audit [DBG] from='client.15678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:40.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:40 vm09 bash[34466]: audit 2026-04-15T13:42:40.217463+0000 mgr.vm06.qbbldl (mgr.14229) 534 : audit [DBG] from='client.15678 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:41 vm06 bash[28114]: audit 2026-04-15T13:42:40.479499+0000 mon.vm06 (mon.0) 1111 : audit [DBG] from='client.? 192.168.123.106:0/4267717918' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:41 vm06 bash[28114]: audit 2026-04-15T13:42:40.479499+0000 mon.vm06 (mon.0) 1111 : audit [DBG] from='client.? 192.168.123.106:0/4267717918' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:41.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:41 vm09 bash[34466]: audit 2026-04-15T13:42:40.479499+0000 mon.vm06 (mon.0) 1111 : audit [DBG] from='client.? 192.168.123.106:0/4267717918' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:41.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:41 vm09 bash[34466]: audit 2026-04-15T13:42:40.479499+0000 mon.vm06 (mon.0) 1111 : audit [DBG] from='client.? 192.168.123.106:0/4267717918' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:42.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:42 vm06 bash[28114]: cluster 2026-04-15T13:42:42.157523+0000 mgr.vm06.qbbldl (mgr.14229) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:42.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:42 vm06 bash[28114]: cluster 2026-04-15T13:42:42.157523+0000 mgr.vm06.qbbldl (mgr.14229) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:42.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:42 vm09 bash[34466]: cluster 2026-04-15T13:42:42.157523+0000 mgr.vm06.qbbldl (mgr.14229) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:42.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:42 vm09 bash[34466]: cluster 2026-04-15T13:42:42.157523+0000 mgr.vm06.qbbldl (mgr.14229) 535 : cluster [DBG] pgmap v287: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:45.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:45 vm06 bash[28114]: cluster 2026-04-15T13:42:44.158121+0000 mgr.vm06.qbbldl (mgr.14229) 536 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:45.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:45 vm06 bash[28114]: cluster 2026-04-15T13:42:44.158121+0000 mgr.vm06.qbbldl (mgr.14229) 536 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:45.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:45 vm09 bash[34466]: cluster 2026-04-15T13:42:44.158121+0000 mgr.vm06.qbbldl (mgr.14229) 536 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:45.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:45 vm09 bash[34466]: cluster 2026-04-15T13:42:44.158121+0000 mgr.vm06.qbbldl (mgr.14229) 536 : cluster [DBG] pgmap v288: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:45.731 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:45.942 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:45.942 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 63s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:45.942 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (68s) 63s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:45.942 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 41s ago 7m - - 2026-04-15T13:42:45.942 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 41s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:46.195 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:46.195 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:46.195 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:46.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:46 vm06 bash[28114]: audit 2026-04-15T13:42:46.192027+0000 mon.vm06 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.106:0/3651721259' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:46.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:46 vm06 bash[28114]: audit 2026-04-15T13:42:46.192027+0000 mon.vm06 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.106:0/3651721259' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:46.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:46 vm09 bash[34466]: audit 2026-04-15T13:42:46.192027+0000 mon.vm06 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.106:0/3651721259' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:46.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:46 vm09 bash[34466]: audit 2026-04-15T13:42:46.192027+0000 mon.vm06 (mon.0) 1112 : audit [DBG] from='client.? 192.168.123.106:0/3651721259' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:47 vm06 bash[28114]: audit 2026-04-15T13:42:45.708536+0000 mgr.vm06.qbbldl (mgr.14229) 537 : audit [DBG] from='client.15686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:47 vm06 bash[28114]: audit 2026-04-15T13:42:45.708536+0000 mgr.vm06.qbbldl (mgr.14229) 537 : audit [DBG] from='client.15686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:47 vm06 bash[28114]: audit 2026-04-15T13:42:45.934544+0000 mgr.vm06.qbbldl (mgr.14229) 538 : audit [DBG] from='client.15690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:47 vm06 bash[28114]: audit 2026-04-15T13:42:45.934544+0000 mgr.vm06.qbbldl (mgr.14229) 538 : audit [DBG] from='client.15690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:47 vm06 bash[28114]: cluster 2026-04-15T13:42:46.158613+0000 mgr.vm06.qbbldl (mgr.14229) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:47 vm06 bash[28114]: cluster 2026-04-15T13:42:46.158613+0000 mgr.vm06.qbbldl (mgr.14229) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:47.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:47 vm09 bash[34466]: audit 2026-04-15T13:42:45.708536+0000 mgr.vm06.qbbldl (mgr.14229) 537 : audit [DBG] from='client.15686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:47 vm09 bash[34466]: audit 2026-04-15T13:42:45.708536+0000 mgr.vm06.qbbldl (mgr.14229) 537 : audit [DBG] from='client.15686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:47 vm09 bash[34466]: audit 2026-04-15T13:42:45.934544+0000 mgr.vm06.qbbldl (mgr.14229) 538 : audit [DBG] from='client.15690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:47 vm09 bash[34466]: audit 2026-04-15T13:42:45.934544+0000 mgr.vm06.qbbldl (mgr.14229) 538 : audit [DBG] from='client.15690 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:47 vm09 bash[34466]: cluster 2026-04-15T13:42:46.158613+0000 mgr.vm06.qbbldl (mgr.14229) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:47 vm09 bash[34466]: cluster 2026-04-15T13:42:46.158613+0000 mgr.vm06.qbbldl (mgr.14229) 539 : cluster [DBG] pgmap v289: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:49.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:49 vm06 bash[28114]: cluster 2026-04-15T13:42:48.159235+0000 mgr.vm06.qbbldl (mgr.14229) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:49.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:49 vm06 bash[28114]: cluster 2026-04-15T13:42:48.159235+0000 mgr.vm06.qbbldl (mgr.14229) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:49.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:49 vm09 bash[34466]: cluster 2026-04-15T13:42:48.159235+0000 mgr.vm06.qbbldl (mgr.14229) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:49.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:49 vm09 bash[34466]: cluster 2026-04-15T13:42:48.159235+0000 mgr.vm06.qbbldl (mgr.14229) 540 : cluster [DBG] pgmap v290: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:51.415 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:51.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:51 vm06 bash[28114]: cluster 2026-04-15T13:42:50.159655+0000 mgr.vm06.qbbldl (mgr.14229) 541 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:51.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:51 vm06 bash[28114]: cluster 2026-04-15T13:42:50.159655+0000 mgr.vm06.qbbldl (mgr.14229) 541 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:51.605 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:51.605 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 68s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:51.605 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (74s) 68s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:51.605 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 47s ago 7m - - 2026-04-15T13:42:51.605 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 47s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:51.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:51 vm09 bash[34466]: cluster 2026-04-15T13:42:50.159655+0000 mgr.vm06.qbbldl (mgr.14229) 541 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:51.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:51 vm09 bash[34466]: cluster 2026-04-15T13:42:50.159655+0000 mgr.vm06.qbbldl (mgr.14229) 541 : cluster [DBG] pgmap v291: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:42:51.856 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:51.856 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:51.856 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:52.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:52 vm06 bash[28114]: audit 2026-04-15T13:42:51.393183+0000 mgr.vm06.qbbldl (mgr.14229) 542 : audit [DBG] from='client.15698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:52.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:52 vm06 bash[28114]: audit 2026-04-15T13:42:51.393183+0000 mgr.vm06.qbbldl (mgr.14229) 542 : audit [DBG] from='client.15698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:52.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:52 vm06 bash[28114]: audit 2026-04-15T13:42:51.853247+0000 mon.vm06 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.106:0/1787409228' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:52.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:52 vm06 bash[28114]: audit 2026-04-15T13:42:51.853247+0000 mon.vm06 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.106:0/1787409228' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:52.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:52 vm09 bash[34466]: audit 2026-04-15T13:42:51.393183+0000 mgr.vm06.qbbldl (mgr.14229) 542 : audit [DBG] from='client.15698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:52.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:52 vm09 bash[34466]: audit 2026-04-15T13:42:51.393183+0000 mgr.vm06.qbbldl (mgr.14229) 542 : audit [DBG] from='client.15698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:52.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:52 vm09 bash[34466]: audit 2026-04-15T13:42:51.853247+0000 mon.vm06 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.106:0/1787409228' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:52.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:52 vm09 bash[34466]: audit 2026-04-15T13:42:51.853247+0000 mon.vm06 (mon.0) 1113 : audit [DBG] from='client.? 192.168.123.106:0/1787409228' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:53.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:53 vm06 bash[28114]: audit 2026-04-15T13:42:51.599078+0000 mgr.vm06.qbbldl (mgr.14229) 543 : audit [DBG] from='client.15702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:53.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:53 vm06 bash[28114]: audit 2026-04-15T13:42:51.599078+0000 mgr.vm06.qbbldl (mgr.14229) 543 : audit [DBG] from='client.15702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:53.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:53 vm06 bash[28114]: cluster 2026-04-15T13:42:52.160068+0000 mgr.vm06.qbbldl (mgr.14229) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:53.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:53 vm06 bash[28114]: cluster 2026-04-15T13:42:52.160068+0000 mgr.vm06.qbbldl (mgr.14229) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:53 vm09 bash[34466]: audit 2026-04-15T13:42:51.599078+0000 mgr.vm06.qbbldl (mgr.14229) 543 : audit [DBG] from='client.15702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:53 vm09 bash[34466]: audit 2026-04-15T13:42:51.599078+0000 mgr.vm06.qbbldl (mgr.14229) 543 : audit [DBG] from='client.15702 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:53 vm09 bash[34466]: cluster 2026-04-15T13:42:52.160068+0000 mgr.vm06.qbbldl (mgr.14229) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:53.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:53 vm09 bash[34466]: cluster 2026-04-15T13:42:52.160068+0000 mgr.vm06.qbbldl (mgr.14229) 544 : cluster [DBG] pgmap v292: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:42:54.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:54 vm09 bash[34466]: audit 2026-04-15T13:42:53.488376+0000 mon.vm06 (mon.0) 1114 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:54.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:54 vm09 bash[34466]: audit 2026-04-15T13:42:53.488376+0000 mon.vm06 (mon.0) 1114 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:54 vm06 bash[28114]: audit 2026-04-15T13:42:53.488376+0000 mon.vm06 (mon.0) 1114 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:54 vm06 bash[28114]: audit 2026-04-15T13:42:53.488376+0000 mon.vm06 (mon.0) 1114 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:42:55.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:55 vm09 bash[34466]: cluster 2026-04-15T13:42:54.160576+0000 mgr.vm06.qbbldl (mgr.14229) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:55.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:55 vm09 bash[34466]: cluster 2026-04-15T13:42:54.160576+0000 mgr.vm06.qbbldl (mgr.14229) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:55 vm06 bash[28114]: cluster 2026-04-15T13:42:54.160576+0000 mgr.vm06.qbbldl (mgr.14229) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:55 vm06 bash[28114]: cluster 2026-04-15T13:42:54.160576+0000 mgr.vm06.qbbldl (mgr.14229) 545 : cluster [DBG] pgmap v293: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:57.068 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:42:57.258 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:42:57.258 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 74s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:42:57.258 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (80s) 74s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:42:57.258 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 53s ago 7m - - 2026-04-15T13:42:57.258 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 53s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:42:57.493 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:42:57.493 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:42:57.493 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:42:57.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:57 vm09 bash[34466]: cluster 2026-04-15T13:42:56.161055+0000 mgr.vm06.qbbldl (mgr.14229) 546 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:57.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:57 vm09 bash[34466]: cluster 2026-04-15T13:42:56.161055+0000 mgr.vm06.qbbldl (mgr.14229) 546 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:57 vm06 bash[28114]: cluster 2026-04-15T13:42:56.161055+0000 mgr.vm06.qbbldl (mgr.14229) 546 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:57 vm06 bash[28114]: cluster 2026-04-15T13:42:56.161055+0000 mgr.vm06.qbbldl (mgr.14229) 546 : cluster [DBG] pgmap v294: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:58 vm09 bash[34466]: audit 2026-04-15T13:42:57.045513+0000 mgr.vm06.qbbldl (mgr.14229) 547 : audit [DBG] from='client.15710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:58 vm09 bash[34466]: audit 2026-04-15T13:42:57.045513+0000 mgr.vm06.qbbldl (mgr.14229) 547 : audit [DBG] from='client.15710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:58 vm09 bash[34466]: audit 2026-04-15T13:42:57.252668+0000 mgr.vm06.qbbldl (mgr.14229) 548 : audit [DBG] from='client.15714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:58 vm09 bash[34466]: audit 2026-04-15T13:42:57.252668+0000 mgr.vm06.qbbldl (mgr.14229) 548 : audit [DBG] from='client.15714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:58 vm09 bash[34466]: audit 2026-04-15T13:42:57.490433+0000 mon.vm06 (mon.0) 1115 : audit [DBG] from='client.? 192.168.123.106:0/2670368659' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:58.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:58 vm09 bash[34466]: audit 2026-04-15T13:42:57.490433+0000 mon.vm06 (mon.0) 1115 : audit [DBG] from='client.? 192.168.123.106:0/2670368659' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:58 vm06 bash[28114]: audit 2026-04-15T13:42:57.045513+0000 mgr.vm06.qbbldl (mgr.14229) 547 : audit [DBG] from='client.15710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:58 vm06 bash[28114]: audit 2026-04-15T13:42:57.045513+0000 mgr.vm06.qbbldl (mgr.14229) 547 : audit [DBG] from='client.15710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:58 vm06 bash[28114]: audit 2026-04-15T13:42:57.252668+0000 mgr.vm06.qbbldl (mgr.14229) 548 : audit [DBG] from='client.15714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:58 vm06 bash[28114]: audit 2026-04-15T13:42:57.252668+0000 mgr.vm06.qbbldl (mgr.14229) 548 : audit [DBG] from='client.15714 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:42:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:58 vm06 bash[28114]: audit 2026-04-15T13:42:57.490433+0000 mon.vm06 (mon.0) 1115 : audit [DBG] from='client.? 192.168.123.106:0/2670368659' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:58 vm06 bash[28114]: audit 2026-04-15T13:42:57.490433+0000 mon.vm06 (mon.0) 1115 : audit [DBG] from='client.? 192.168.123.106:0/2670368659' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:42:59.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:59 vm09 bash[34466]: cluster 2026-04-15T13:42:58.161410+0000 mgr.vm06.qbbldl (mgr.14229) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:59.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:42:59 vm09 bash[34466]: cluster 2026-04-15T13:42:58.161410+0000 mgr.vm06.qbbldl (mgr.14229) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:59.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:59 vm06 bash[28114]: cluster 2026-04-15T13:42:58.161410+0000 mgr.vm06.qbbldl (mgr.14229) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:42:59.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:42:59 vm06 bash[28114]: cluster 2026-04-15T13:42:58.161410+0000 mgr.vm06.qbbldl (mgr.14229) 549 : cluster [DBG] pgmap v295: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:01 vm09 bash[34466]: cluster 2026-04-15T13:43:00.161840+0000 mgr.vm06.qbbldl (mgr.14229) 550 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:01.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:01 vm09 bash[34466]: cluster 2026-04-15T13:43:00.161840+0000 mgr.vm06.qbbldl (mgr.14229) 550 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:01 vm06 bash[28114]: cluster 2026-04-15T13:43:00.161840+0000 mgr.vm06.qbbldl (mgr.14229) 550 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:01 vm06 bash[28114]: cluster 2026-04-15T13:43:00.161840+0000 mgr.vm06.qbbldl (mgr.14229) 550 : cluster [DBG] pgmap v296: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:02.709 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:02.898 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:02.898 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 80s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:02.898 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (85s) 80s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:02.898 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 58s ago 7m - - 2026-04-15T13:43:02.898 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 58s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:03.130 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:03.130 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:03.130 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:03 vm09 bash[34466]: cluster 2026-04-15T13:43:02.162233+0000 mgr.vm06.qbbldl (mgr.14229) 551 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:03 vm09 bash[34466]: cluster 2026-04-15T13:43:02.162233+0000 mgr.vm06.qbbldl (mgr.14229) 551 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:03 vm09 bash[34466]: audit 2026-04-15T13:43:03.126912+0000 mon.vm06 (mon.0) 1116 : audit [DBG] from='client.? 192.168.123.106:0/2457319601' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:03.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:03 vm09 bash[34466]: audit 2026-04-15T13:43:03.126912+0000 mon.vm06 (mon.0) 1116 : audit [DBG] from='client.? 192.168.123.106:0/2457319601' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:03.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:03 vm06 bash[28114]: cluster 2026-04-15T13:43:02.162233+0000 mgr.vm06.qbbldl (mgr.14229) 551 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:03.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:03 vm06 bash[28114]: cluster 2026-04-15T13:43:02.162233+0000 mgr.vm06.qbbldl (mgr.14229) 551 : cluster [DBG] pgmap v297: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:03.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:03 vm06 bash[28114]: audit 2026-04-15T13:43:03.126912+0000 mon.vm06 (mon.0) 1116 : audit [DBG] from='client.? 192.168.123.106:0/2457319601' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:03.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:03 vm06 bash[28114]: audit 2026-04-15T13:43:03.126912+0000 mon.vm06 (mon.0) 1116 : audit [DBG] from='client.? 192.168.123.106:0/2457319601' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:04 vm09 bash[34466]: audit 2026-04-15T13:43:02.688305+0000 mgr.vm06.qbbldl (mgr.14229) 552 : audit [DBG] from='client.15722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:04 vm09 bash[34466]: audit 2026-04-15T13:43:02.688305+0000 mgr.vm06.qbbldl (mgr.14229) 552 : audit [DBG] from='client.15722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:04 vm09 bash[34466]: audit 2026-04-15T13:43:02.891882+0000 mgr.vm06.qbbldl (mgr.14229) 553 : audit [DBG] from='client.25141 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:04 vm09 bash[34466]: audit 2026-04-15T13:43:02.891882+0000 mgr.vm06.qbbldl (mgr.14229) 553 : audit [DBG] from='client.25141 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:04 vm09 bash[34466]: audit 2026-04-15T13:43:04.167658+0000 mon.vm06 (mon.0) 1117 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:43:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:04 vm09 bash[34466]: audit 2026-04-15T13:43:04.167658+0000 mon.vm06 (mon.0) 1117 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:43:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:04 vm06 bash[28114]: audit 2026-04-15T13:43:02.688305+0000 mgr.vm06.qbbldl (mgr.14229) 552 : audit [DBG] from='client.15722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:04 vm06 bash[28114]: audit 2026-04-15T13:43:02.688305+0000 mgr.vm06.qbbldl (mgr.14229) 552 : audit [DBG] from='client.15722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:04 vm06 bash[28114]: audit 2026-04-15T13:43:02.891882+0000 mgr.vm06.qbbldl (mgr.14229) 553 : audit [DBG] from='client.25141 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:04 vm06 bash[28114]: audit 2026-04-15T13:43:02.891882+0000 mgr.vm06.qbbldl (mgr.14229) 553 : audit [DBG] from='client.25141 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:04 vm06 bash[28114]: audit 2026-04-15T13:43:04.167658+0000 mon.vm06 (mon.0) 1117 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:43:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:04 vm06 bash[28114]: audit 2026-04-15T13:43:04.167658+0000 mon.vm06 (mon.0) 1117 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: cluster 2026-04-15T13:43:04.162631+0000 mgr.vm06.qbbldl (mgr.14229) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: cluster 2026-04-15T13:43:04.162631+0000 mgr.vm06.qbbldl (mgr.14229) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.525889+0000 mon.vm06 (mon.0) 1118 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.525889+0000 mon.vm06 (mon.0) 1118 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.526561+0000 mon.vm06 (mon.0) 1119 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.526561+0000 mon.vm06 (mon.0) 1119 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.532007+0000 mon.vm06 (mon.0) 1120 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.532007+0000 mon.vm06 (mon.0) 1120 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.533486+0000 mon.vm06 (mon.0) 1121 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:43:05.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:05 vm09 bash[34466]: audit 2026-04-15T13:43:04.533486+0000 mon.vm06 (mon.0) 1121 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: cluster 2026-04-15T13:43:04.162631+0000 mgr.vm06.qbbldl (mgr.14229) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: cluster 2026-04-15T13:43:04.162631+0000 mgr.vm06.qbbldl (mgr.14229) 554 : cluster [DBG] pgmap v298: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.525889+0000 mon.vm06 (mon.0) 1118 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.525889+0000 mon.vm06 (mon.0) 1118 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.526561+0000 mon.vm06 (mon.0) 1119 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.526561+0000 mon.vm06 (mon.0) 1119 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.532007+0000 mon.vm06 (mon.0) 1120 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.532007+0000 mon.vm06 (mon.0) 1120 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.533486+0000 mon.vm06 (mon.0) 1121 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:43:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:05 vm06 bash[28114]: audit 2026-04-15T13:43:04.533486+0000 mon.vm06 (mon.0) 1121 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:43:06.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:06 vm09 bash[34466]: cluster 2026-04-15T13:43:04.527519+0000 mgr.vm06.qbbldl (mgr.14229) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:06 vm09 bash[34466]: cluster 2026-04-15T13:43:04.527519+0000 mgr.vm06.qbbldl (mgr.14229) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:06 vm09 bash[34466]: cluster 2026-04-15T13:43:04.527648+0000 mgr.vm06.qbbldl (mgr.14229) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:06 vm09 bash[34466]: cluster 2026-04-15T13:43:04.527648+0000 mgr.vm06.qbbldl (mgr.14229) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:06 vm06 bash[28114]: cluster 2026-04-15T13:43:04.527519+0000 mgr.vm06.qbbldl (mgr.14229) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:06 vm06 bash[28114]: cluster 2026-04-15T13:43:04.527519+0000 mgr.vm06.qbbldl (mgr.14229) 555 : cluster [DBG] pgmap v299: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:06 vm06 bash[28114]: cluster 2026-04-15T13:43:04.527648+0000 mgr.vm06.qbbldl (mgr.14229) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:06 vm06 bash[28114]: cluster 2026-04-15T13:43:04.527648+0000 mgr.vm06.qbbldl (mgr.14229) 556 : cluster [DBG] pgmap v300: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:08.357 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:08.555 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:08.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 85s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:08.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (91s) 85s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:08.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 64s ago 7m - - 2026-04-15T13:43:08.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 64s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:08.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:08 vm09 bash[34466]: cluster 2026-04-15T13:43:06.527998+0000 mgr.vm06.qbbldl (mgr.14229) 557 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:08.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:08 vm09 bash[34466]: cluster 2026-04-15T13:43:06.527998+0000 mgr.vm06.qbbldl (mgr.14229) 557 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:08 vm06 bash[28114]: cluster 2026-04-15T13:43:06.527998+0000 mgr.vm06.qbbldl (mgr.14229) 557 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:08 vm06 bash[28114]: cluster 2026-04-15T13:43:06.527998+0000 mgr.vm06.qbbldl (mgr.14229) 557 : cluster [DBG] pgmap v301: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:08.791 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:08.791 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:08.791 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:09 vm09 bash[34466]: audit 2026-04-15T13:43:08.337469+0000 mgr.vm06.qbbldl (mgr.14229) 558 : audit [DBG] from='client.15734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:09 vm09 bash[34466]: audit 2026-04-15T13:43:08.337469+0000 mgr.vm06.qbbldl (mgr.14229) 558 : audit [DBG] from='client.15734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:09 vm09 bash[34466]: audit 2026-04-15T13:43:08.488530+0000 mon.vm06 (mon.0) 1122 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:09 vm09 bash[34466]: audit 2026-04-15T13:43:08.488530+0000 mon.vm06 (mon.0) 1122 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:09 vm09 bash[34466]: audit 2026-04-15T13:43:08.788099+0000 mon.vm06 (mon.0) 1123 : audit [DBG] from='client.? 192.168.123.106:0/26409556' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:09 vm09 bash[34466]: audit 2026-04-15T13:43:08.788099+0000 mon.vm06 (mon.0) 1123 : audit [DBG] from='client.? 192.168.123.106:0/26409556' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:09 vm06 bash[28114]: audit 2026-04-15T13:43:08.337469+0000 mgr.vm06.qbbldl (mgr.14229) 558 : audit [DBG] from='client.15734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:09 vm06 bash[28114]: audit 2026-04-15T13:43:08.337469+0000 mgr.vm06.qbbldl (mgr.14229) 558 : audit [DBG] from='client.15734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:09 vm06 bash[28114]: audit 2026-04-15T13:43:08.488530+0000 mon.vm06 (mon.0) 1122 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:09 vm06 bash[28114]: audit 2026-04-15T13:43:08.488530+0000 mon.vm06 (mon.0) 1122 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:09 vm06 bash[28114]: audit 2026-04-15T13:43:08.788099+0000 mon.vm06 (mon.0) 1123 : audit [DBG] from='client.? 192.168.123.106:0/26409556' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:09 vm06 bash[28114]: audit 2026-04-15T13:43:08.788099+0000 mon.vm06 (mon.0) 1123 : audit [DBG] from='client.? 192.168.123.106:0/26409556' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:10 vm09 bash[34466]: cluster 2026-04-15T13:43:08.528357+0000 mgr.vm06.qbbldl (mgr.14229) 559 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:10 vm09 bash[34466]: cluster 2026-04-15T13:43:08.528357+0000 mgr.vm06.qbbldl (mgr.14229) 559 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:10 vm09 bash[34466]: audit 2026-04-15T13:43:08.549793+0000 mgr.vm06.qbbldl (mgr.14229) 560 : audit [DBG] from='client.15738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:10 vm09 bash[34466]: audit 2026-04-15T13:43:08.549793+0000 mgr.vm06.qbbldl (mgr.14229) 560 : audit [DBG] from='client.15738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:10.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:10 vm06 bash[28114]: cluster 2026-04-15T13:43:08.528357+0000 mgr.vm06.qbbldl (mgr.14229) 559 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:10.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:10 vm06 bash[28114]: cluster 2026-04-15T13:43:08.528357+0000 mgr.vm06.qbbldl (mgr.14229) 559 : cluster [DBG] pgmap v302: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:10.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:10 vm06 bash[28114]: audit 2026-04-15T13:43:08.549793+0000 mgr.vm06.qbbldl (mgr.14229) 560 : audit [DBG] from='client.15738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:10.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:10 vm06 bash[28114]: audit 2026-04-15T13:43:08.549793+0000 mgr.vm06.qbbldl (mgr.14229) 560 : audit [DBG] from='client.15738 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:12.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:12 vm09 bash[34466]: cluster 2026-04-15T13:43:10.528849+0000 mgr.vm06.qbbldl (mgr.14229) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:12.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:12 vm09 bash[34466]: cluster 2026-04-15T13:43:10.528849+0000 mgr.vm06.qbbldl (mgr.14229) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:12 vm06 bash[28114]: cluster 2026-04-15T13:43:10.528849+0000 mgr.vm06.qbbldl (mgr.14229) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:12 vm06 bash[28114]: cluster 2026-04-15T13:43:10.528849+0000 mgr.vm06.qbbldl (mgr.14229) 561 : cluster [DBG] pgmap v303: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:14.010 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:14.197 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:14.197 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (6m) 91s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:14.197 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (96s) 91s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:14.197 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 70s ago 7m - - 2026-04-15T13:43:14.197 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 70s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:14.451 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:14.451 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:14.451 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:14 vm06 bash[28114]: cluster 2026-04-15T13:43:12.529203+0000 mgr.vm06.qbbldl (mgr.14229) 562 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:14 vm06 bash[28114]: cluster 2026-04-15T13:43:12.529203+0000 mgr.vm06.qbbldl (mgr.14229) 562 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:14.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:14 vm09 bash[34466]: cluster 2026-04-15T13:43:12.529203+0000 mgr.vm06.qbbldl (mgr.14229) 562 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:14 vm09 bash[34466]: cluster 2026-04-15T13:43:12.529203+0000 mgr.vm06.qbbldl (mgr.14229) 562 : cluster [DBG] pgmap v304: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:43:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:15 vm06 bash[28114]: audit 2026-04-15T13:43:13.989629+0000 mgr.vm06.qbbldl (mgr.14229) 563 : audit [DBG] from='client.15746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:15 vm06 bash[28114]: audit 2026-04-15T13:43:13.989629+0000 mgr.vm06.qbbldl (mgr.14229) 563 : audit [DBG] from='client.15746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:15 vm06 bash[28114]: audit 2026-04-15T13:43:14.191167+0000 mgr.vm06.qbbldl (mgr.14229) 564 : audit [DBG] from='client.15750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:15 vm06 bash[28114]: audit 2026-04-15T13:43:14.191167+0000 mgr.vm06.qbbldl (mgr.14229) 564 : audit [DBG] from='client.15750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:15 vm06 bash[28114]: audit 2026-04-15T13:43:14.447999+0000 mon.vm06 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.106:0/2726978261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:15 vm06 bash[28114]: audit 2026-04-15T13:43:14.447999+0000 mon.vm06 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.106:0/2726978261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:15.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:15 vm09 bash[34466]: audit 2026-04-15T13:43:13.989629+0000 mgr.vm06.qbbldl (mgr.14229) 563 : audit [DBG] from='client.15746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:15 vm09 bash[34466]: audit 2026-04-15T13:43:13.989629+0000 mgr.vm06.qbbldl (mgr.14229) 563 : audit [DBG] from='client.15746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:15 vm09 bash[34466]: audit 2026-04-15T13:43:14.191167+0000 mgr.vm06.qbbldl (mgr.14229) 564 : audit [DBG] from='client.15750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:15 vm09 bash[34466]: audit 2026-04-15T13:43:14.191167+0000 mgr.vm06.qbbldl (mgr.14229) 564 : audit [DBG] from='client.15750 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:15 vm09 bash[34466]: audit 2026-04-15T13:43:14.447999+0000 mon.vm06 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.106:0/2726978261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:15 vm09 bash[34466]: audit 2026-04-15T13:43:14.447999+0000 mon.vm06 (mon.0) 1124 : audit [DBG] from='client.? 192.168.123.106:0/2726978261' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:16 vm06 bash[28114]: cluster 2026-04-15T13:43:14.529648+0000 mgr.vm06.qbbldl (mgr.14229) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:43:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:16 vm06 bash[28114]: cluster 2026-04-15T13:43:14.529648+0000 mgr.vm06.qbbldl (mgr.14229) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:43:16.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:16 vm09 bash[34466]: cluster 2026-04-15T13:43:14.529648+0000 mgr.vm06.qbbldl (mgr.14229) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:43:16.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:16 vm09 bash[34466]: cluster 2026-04-15T13:43:14.529648+0000 mgr.vm06.qbbldl (mgr.14229) 565 : cluster [DBG] pgmap v305: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:43:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:18 vm06 bash[28114]: cluster 2026-04-15T13:43:16.530152+0000 mgr.vm06.qbbldl (mgr.14229) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:18 vm06 bash[28114]: cluster 2026-04-15T13:43:16.530152+0000 mgr.vm06.qbbldl (mgr.14229) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:18.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:18 vm09 bash[34466]: cluster 2026-04-15T13:43:16.530152+0000 mgr.vm06.qbbldl (mgr.14229) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:18.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:18 vm09 bash[34466]: cluster 2026-04-15T13:43:16.530152+0000 mgr.vm06.qbbldl (mgr.14229) 566 : cluster [DBG] pgmap v306: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:19.663 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:19.866 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:19.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 96s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:19.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (102s) 96s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:19.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 75s ago 7m - - 2026-04-15T13:43:19.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 75s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:20.126 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:20.126 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:20.126 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:20 vm06 bash[28114]: cluster 2026-04-15T13:43:18.530669+0000 mgr.vm06.qbbldl (mgr.14229) 567 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:20 vm06 bash[28114]: cluster 2026-04-15T13:43:18.530669+0000 mgr.vm06.qbbldl (mgr.14229) 567 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:20 vm06 bash[28114]: audit 2026-04-15T13:43:20.123164+0000 mon.vm06 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.106:0/3173804121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:20 vm06 bash[28114]: audit 2026-04-15T13:43:20.123164+0000 mon.vm06 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.106:0/3173804121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:20.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:20 vm09 bash[34466]: cluster 2026-04-15T13:43:18.530669+0000 mgr.vm06.qbbldl (mgr.14229) 567 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:20.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:20 vm09 bash[34466]: cluster 2026-04-15T13:43:18.530669+0000 mgr.vm06.qbbldl (mgr.14229) 567 : cluster [DBG] pgmap v307: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:20.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:20 vm09 bash[34466]: audit 2026-04-15T13:43:20.123164+0000 mon.vm06 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.106:0/3173804121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:20.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:20 vm09 bash[34466]: audit 2026-04-15T13:43:20.123164+0000 mon.vm06 (mon.0) 1125 : audit [DBG] from='client.? 192.168.123.106:0/3173804121' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:21 vm06 bash[28114]: audit 2026-04-15T13:43:19.642884+0000 mgr.vm06.qbbldl (mgr.14229) 568 : audit [DBG] from='client.15758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:21 vm06 bash[28114]: audit 2026-04-15T13:43:19.642884+0000 mgr.vm06.qbbldl (mgr.14229) 568 : audit [DBG] from='client.15758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:21 vm06 bash[28114]: audit 2026-04-15T13:43:19.860713+0000 mgr.vm06.qbbldl (mgr.14229) 569 : audit [DBG] from='client.15762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:21 vm06 bash[28114]: audit 2026-04-15T13:43:19.860713+0000 mgr.vm06.qbbldl (mgr.14229) 569 : audit [DBG] from='client.15762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:21 vm09 bash[34466]: audit 2026-04-15T13:43:19.642884+0000 mgr.vm06.qbbldl (mgr.14229) 568 : audit [DBG] from='client.15758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:21 vm09 bash[34466]: audit 2026-04-15T13:43:19.642884+0000 mgr.vm06.qbbldl (mgr.14229) 568 : audit [DBG] from='client.15758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:21 vm09 bash[34466]: audit 2026-04-15T13:43:19.860713+0000 mgr.vm06.qbbldl (mgr.14229) 569 : audit [DBG] from='client.15762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:21.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:21 vm09 bash[34466]: audit 2026-04-15T13:43:19.860713+0000 mgr.vm06.qbbldl (mgr.14229) 569 : audit [DBG] from='client.15762 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:22 vm06 bash[28114]: cluster 2026-04-15T13:43:20.531187+0000 mgr.vm06.qbbldl (mgr.14229) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:22 vm06 bash[28114]: cluster 2026-04-15T13:43:20.531187+0000 mgr.vm06.qbbldl (mgr.14229) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:22.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:22 vm09 bash[34466]: cluster 2026-04-15T13:43:20.531187+0000 mgr.vm06.qbbldl (mgr.14229) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:22.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:22 vm09 bash[34466]: cluster 2026-04-15T13:43:20.531187+0000 mgr.vm06.qbbldl (mgr.14229) 570 : cluster [DBG] pgmap v308: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:24 vm06 bash[28114]: cluster 2026-04-15T13:43:22.531628+0000 mgr.vm06.qbbldl (mgr.14229) 571 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:24 vm06 bash[28114]: cluster 2026-04-15T13:43:22.531628+0000 mgr.vm06.qbbldl (mgr.14229) 571 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:24 vm06 bash[28114]: audit 2026-04-15T13:43:23.488907+0000 mon.vm06 (mon.0) 1126 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:24 vm06 bash[28114]: audit 2026-04-15T13:43:23.488907+0000 mon.vm06 (mon.0) 1126 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:24 vm09 bash[34466]: cluster 2026-04-15T13:43:22.531628+0000 mgr.vm06.qbbldl (mgr.14229) 571 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:24 vm09 bash[34466]: cluster 2026-04-15T13:43:22.531628+0000 mgr.vm06.qbbldl (mgr.14229) 571 : cluster [DBG] pgmap v309: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:24 vm09 bash[34466]: audit 2026-04-15T13:43:23.488907+0000 mon.vm06 (mon.0) 1126 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:24.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:24 vm09 bash[34466]: audit 2026-04-15T13:43:23.488907+0000 mon.vm06 (mon.0) 1126 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:25.339 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:25.548 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:25.548 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 102s ago 7m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:25.548 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (108s) 102s ago 7m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:25.548 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 81s ago 7m - - 2026-04-15T13:43:25.548 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (7m) 81s ago 7m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:25.798 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:25.798 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:25.798 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:26 vm06 bash[28114]: cluster 2026-04-15T13:43:24.532043+0000 mgr.vm06.qbbldl (mgr.14229) 572 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:26 vm06 bash[28114]: cluster 2026-04-15T13:43:24.532043+0000 mgr.vm06.qbbldl (mgr.14229) 572 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:26 vm06 bash[28114]: audit 2026-04-15T13:43:25.321760+0000 mgr.vm06.qbbldl (mgr.14229) 573 : audit [DBG] from='client.15770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:26 vm06 bash[28114]: audit 2026-04-15T13:43:25.321760+0000 mgr.vm06.qbbldl (mgr.14229) 573 : audit [DBG] from='client.15770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:26 vm06 bash[28114]: audit 2026-04-15T13:43:25.794705+0000 mon.vm06 (mon.0) 1127 : audit [DBG] from='client.? 192.168.123.106:0/199843392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:26 vm06 bash[28114]: audit 2026-04-15T13:43:25.794705+0000 mon.vm06 (mon.0) 1127 : audit [DBG] from='client.? 192.168.123.106:0/199843392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:26.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:26 vm09 bash[34466]: cluster 2026-04-15T13:43:24.532043+0000 mgr.vm06.qbbldl (mgr.14229) 572 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:26 vm09 bash[34466]: cluster 2026-04-15T13:43:24.532043+0000 mgr.vm06.qbbldl (mgr.14229) 572 : cluster [DBG] pgmap v310: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:26 vm09 bash[34466]: audit 2026-04-15T13:43:25.321760+0000 mgr.vm06.qbbldl (mgr.14229) 573 : audit [DBG] from='client.15770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:26 vm09 bash[34466]: audit 2026-04-15T13:43:25.321760+0000 mgr.vm06.qbbldl (mgr.14229) 573 : audit [DBG] from='client.15770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:26 vm09 bash[34466]: audit 2026-04-15T13:43:25.794705+0000 mon.vm06 (mon.0) 1127 : audit [DBG] from='client.? 192.168.123.106:0/199843392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:26 vm09 bash[34466]: audit 2026-04-15T13:43:25.794705+0000 mon.vm06 (mon.0) 1127 : audit [DBG] from='client.? 192.168.123.106:0/199843392' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:27 vm06 bash[28114]: audit 2026-04-15T13:43:25.541953+0000 mgr.vm06.qbbldl (mgr.14229) 574 : audit [DBG] from='client.15774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:27 vm06 bash[28114]: audit 2026-04-15T13:43:25.541953+0000 mgr.vm06.qbbldl (mgr.14229) 574 : audit [DBG] from='client.15774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:27.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:27 vm09 bash[34466]: audit 2026-04-15T13:43:25.541953+0000 mgr.vm06.qbbldl (mgr.14229) 574 : audit [DBG] from='client.15774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:27 vm09 bash[34466]: audit 2026-04-15T13:43:25.541953+0000 mgr.vm06.qbbldl (mgr.14229) 574 : audit [DBG] from='client.15774 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:28 vm06 bash[28114]: cluster 2026-04-15T13:43:26.532602+0000 mgr.vm06.qbbldl (mgr.14229) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:28 vm06 bash[28114]: cluster 2026-04-15T13:43:26.532602+0000 mgr.vm06.qbbldl (mgr.14229) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:28.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:28 vm09 bash[34466]: cluster 2026-04-15T13:43:26.532602+0000 mgr.vm06.qbbldl (mgr.14229) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:28.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:28 vm09 bash[34466]: cluster 2026-04-15T13:43:26.532602+0000 mgr.vm06.qbbldl (mgr.14229) 575 : cluster [DBG] pgmap v311: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:29 vm06 bash[28114]: cluster 2026-04-15T13:43:28.533035+0000 mgr.vm06.qbbldl (mgr.14229) 576 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:29 vm06 bash[28114]: cluster 2026-04-15T13:43:28.533035+0000 mgr.vm06.qbbldl (mgr.14229) 576 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:29 vm09 bash[34466]: cluster 2026-04-15T13:43:28.533035+0000 mgr.vm06.qbbldl (mgr.14229) 576 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:29 vm09 bash[34466]: cluster 2026-04-15T13:43:28.533035+0000 mgr.vm06.qbbldl (mgr.14229) 576 : cluster [DBG] pgmap v312: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:31.024 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:31.231 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:31.231 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 108s ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:31.231 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (113s) 108s ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:31.231 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 87s ago 8m - - 2026-04-15T13:43:31.231 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 87s ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:31.496 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:31.496 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:31.496 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: cluster 2026-04-15T13:43:30.533527+0000 mgr.vm06.qbbldl (mgr.14229) 577 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: cluster 2026-04-15T13:43:30.533527+0000 mgr.vm06.qbbldl (mgr.14229) 577 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: audit 2026-04-15T13:43:30.997782+0000 mgr.vm06.qbbldl (mgr.14229) 578 : audit [DBG] from='client.15782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: audit 2026-04-15T13:43:30.997782+0000 mgr.vm06.qbbldl (mgr.14229) 578 : audit [DBG] from='client.15782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: audit 2026-04-15T13:43:31.224751+0000 mgr.vm06.qbbldl (mgr.14229) 579 : audit [DBG] from='client.15786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: audit 2026-04-15T13:43:31.224751+0000 mgr.vm06.qbbldl (mgr.14229) 579 : audit [DBG] from='client.15786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: audit 2026-04-15T13:43:31.492428+0000 mon.vm06 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.106:0/1313528365' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:31 vm06 bash[28114]: audit 2026-04-15T13:43:31.492428+0000 mon.vm06 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.106:0/1313528365' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:32.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: cluster 2026-04-15T13:43:30.533527+0000 mgr.vm06.qbbldl (mgr.14229) 577 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:32.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: cluster 2026-04-15T13:43:30.533527+0000 mgr.vm06.qbbldl (mgr.14229) 577 : cluster [DBG] pgmap v313: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:32.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: audit 2026-04-15T13:43:30.997782+0000 mgr.vm06.qbbldl (mgr.14229) 578 : audit [DBG] from='client.15782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: audit 2026-04-15T13:43:30.997782+0000 mgr.vm06.qbbldl (mgr.14229) 578 : audit [DBG] from='client.15782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: audit 2026-04-15T13:43:31.224751+0000 mgr.vm06.qbbldl (mgr.14229) 579 : audit [DBG] from='client.15786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: audit 2026-04-15T13:43:31.224751+0000 mgr.vm06.qbbldl (mgr.14229) 579 : audit [DBG] from='client.15786 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:32.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: audit 2026-04-15T13:43:31.492428+0000 mon.vm06 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.106:0/1313528365' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:32.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:31 vm09 bash[34466]: audit 2026-04-15T13:43:31.492428+0000 mon.vm06 (mon.0) 1128 : audit [DBG] from='client.? 192.168.123.106:0/1313528365' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:34.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:33 vm06 bash[28114]: cluster 2026-04-15T13:43:32.533983+0000 mgr.vm06.qbbldl (mgr.14229) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:34.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:33 vm06 bash[28114]: cluster 2026-04-15T13:43:32.533983+0000 mgr.vm06.qbbldl (mgr.14229) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:34.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:33 vm09 bash[34466]: cluster 2026-04-15T13:43:32.533983+0000 mgr.vm06.qbbldl (mgr.14229) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:34.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:33 vm09 bash[34466]: cluster 2026-04-15T13:43:32.533983+0000 mgr.vm06.qbbldl (mgr.14229) 580 : cluster [DBG] pgmap v314: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:36.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:35 vm06 bash[28114]: cluster 2026-04-15T13:43:34.534394+0000 mgr.vm06.qbbldl (mgr.14229) 581 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:36.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:35 vm06 bash[28114]: cluster 2026-04-15T13:43:34.534394+0000 mgr.vm06.qbbldl (mgr.14229) 581 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:36.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:35 vm09 bash[34466]: cluster 2026-04-15T13:43:34.534394+0000 mgr.vm06.qbbldl (mgr.14229) 581 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:36.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:35 vm09 bash[34466]: cluster 2026-04-15T13:43:34.534394+0000 mgr.vm06.qbbldl (mgr.14229) 581 : cluster [DBG] pgmap v315: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:36.713 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:36.917 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:36.917 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 114s ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:36.917 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (119s) 114s ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:36.917 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 92s ago 8m - - 2026-04-15T13:43:36.917 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 92s ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:37.154 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:37.154 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:37.155 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: cluster 2026-04-15T13:43:36.534905+0000 mgr.vm06.qbbldl (mgr.14229) 582 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: cluster 2026-04-15T13:43:36.534905+0000 mgr.vm06.qbbldl (mgr.14229) 582 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: audit 2026-04-15T13:43:36.691906+0000 mgr.vm06.qbbldl (mgr.14229) 583 : audit [DBG] from='client.15794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: audit 2026-04-15T13:43:36.691906+0000 mgr.vm06.qbbldl (mgr.14229) 583 : audit [DBG] from='client.15794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: audit 2026-04-15T13:43:36.910801+0000 mgr.vm06.qbbldl (mgr.14229) 584 : audit [DBG] from='client.15798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: audit 2026-04-15T13:43:36.910801+0000 mgr.vm06.qbbldl (mgr.14229) 584 : audit [DBG] from='client.15798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: audit 2026-04-15T13:43:37.151329+0000 mon.vm06 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.106:0/3320762757' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:38.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:37 vm06 bash[28114]: audit 2026-04-15T13:43:37.151329+0000 mon.vm06 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.106:0/3320762757' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: cluster 2026-04-15T13:43:36.534905+0000 mgr.vm06.qbbldl (mgr.14229) 582 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: cluster 2026-04-15T13:43:36.534905+0000 mgr.vm06.qbbldl (mgr.14229) 582 : cluster [DBG] pgmap v316: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: audit 2026-04-15T13:43:36.691906+0000 mgr.vm06.qbbldl (mgr.14229) 583 : audit [DBG] from='client.15794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: audit 2026-04-15T13:43:36.691906+0000 mgr.vm06.qbbldl (mgr.14229) 583 : audit [DBG] from='client.15794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: audit 2026-04-15T13:43:36.910801+0000 mgr.vm06.qbbldl (mgr.14229) 584 : audit [DBG] from='client.15798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: audit 2026-04-15T13:43:36.910801+0000 mgr.vm06.qbbldl (mgr.14229) 584 : audit [DBG] from='client.15798 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: audit 2026-04-15T13:43:37.151329+0000 mon.vm06 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.106:0/3320762757' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:38.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:37 vm09 bash[34466]: audit 2026-04-15T13:43:37.151329+0000 mon.vm06 (mon.0) 1129 : audit [DBG] from='client.? 192.168.123.106:0/3320762757' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:39.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:38 vm06 bash[28114]: audit 2026-04-15T13:43:38.489213+0000 mon.vm06 (mon.0) 1130 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:39.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:38 vm06 bash[28114]: audit 2026-04-15T13:43:38.489213+0000 mon.vm06 (mon.0) 1130 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:39.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:38 vm09 bash[34466]: audit 2026-04-15T13:43:38.489213+0000 mon.vm06 (mon.0) 1130 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:39.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:38 vm09 bash[34466]: audit 2026-04-15T13:43:38.489213+0000 mon.vm06 (mon.0) 1130 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:40.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:39 vm06 bash[28114]: cluster 2026-04-15T13:43:38.535238+0000 mgr.vm06.qbbldl (mgr.14229) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:40.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:39 vm06 bash[28114]: cluster 2026-04-15T13:43:38.535238+0000 mgr.vm06.qbbldl (mgr.14229) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:40.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:39 vm09 bash[34466]: cluster 2026-04-15T13:43:38.535238+0000 mgr.vm06.qbbldl (mgr.14229) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:40.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:39 vm09 bash[34466]: cluster 2026-04-15T13:43:38.535238+0000 mgr.vm06.qbbldl (mgr.14229) 585 : cluster [DBG] pgmap v317: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:42.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:41 vm06 bash[28114]: cluster 2026-04-15T13:43:40.535653+0000 mgr.vm06.qbbldl (mgr.14229) 586 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:42.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:41 vm06 bash[28114]: cluster 2026-04-15T13:43:40.535653+0000 mgr.vm06.qbbldl (mgr.14229) 586 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:42.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:41 vm09 bash[34466]: cluster 2026-04-15T13:43:40.535653+0000 mgr.vm06.qbbldl (mgr.14229) 586 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:42.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:41 vm09 bash[34466]: cluster 2026-04-15T13:43:40.535653+0000 mgr.vm06.qbbldl (mgr.14229) 586 : cluster [DBG] pgmap v318: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:42.369 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:42.565 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:42.565 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 119s ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:42.565 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 119s ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:42.565 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 98s ago 8m - - 2026-04-15T13:43:42.565 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 98s ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:42.823 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:42.823 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:42.823 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:43.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:42 vm06 bash[28114]: audit 2026-04-15T13:43:42.349035+0000 mgr.vm06.qbbldl (mgr.14229) 587 : audit [DBG] from='client.15806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:43.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:42 vm06 bash[28114]: audit 2026-04-15T13:43:42.349035+0000 mgr.vm06.qbbldl (mgr.14229) 587 : audit [DBG] from='client.15806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:43.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:42 vm09 bash[34466]: audit 2026-04-15T13:43:42.349035+0000 mgr.vm06.qbbldl (mgr.14229) 587 : audit [DBG] from='client.15806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:43.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:42 vm09 bash[34466]: audit 2026-04-15T13:43:42.349035+0000 mgr.vm06.qbbldl (mgr.14229) 587 : audit [DBG] from='client.15806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:44.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:43 vm06 bash[28114]: cluster 2026-04-15T13:43:42.536130+0000 mgr.vm06.qbbldl (mgr.14229) 588 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:44.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:43 vm06 bash[28114]: cluster 2026-04-15T13:43:42.536130+0000 mgr.vm06.qbbldl (mgr.14229) 588 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:44.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:43 vm06 bash[28114]: audit 2026-04-15T13:43:42.559076+0000 mgr.vm06.qbbldl (mgr.14229) 589 : audit [DBG] from='client.15810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:44.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:43 vm06 bash[28114]: audit 2026-04-15T13:43:42.559076+0000 mgr.vm06.qbbldl (mgr.14229) 589 : audit [DBG] from='client.15810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:44.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:43 vm06 bash[28114]: audit 2026-04-15T13:43:42.819714+0000 mon.vm06 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.106:0/1695814776' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:44.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:43 vm06 bash[28114]: audit 2026-04-15T13:43:42.819714+0000 mon.vm06 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.106:0/1695814776' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:44.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:43 vm09 bash[34466]: cluster 2026-04-15T13:43:42.536130+0000 mgr.vm06.qbbldl (mgr.14229) 588 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:44.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:43 vm09 bash[34466]: cluster 2026-04-15T13:43:42.536130+0000 mgr.vm06.qbbldl (mgr.14229) 588 : cluster [DBG] pgmap v319: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:44.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:43 vm09 bash[34466]: audit 2026-04-15T13:43:42.559076+0000 mgr.vm06.qbbldl (mgr.14229) 589 : audit [DBG] from='client.15810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:44.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:43 vm09 bash[34466]: audit 2026-04-15T13:43:42.559076+0000 mgr.vm06.qbbldl (mgr.14229) 589 : audit [DBG] from='client.15810 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:44.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:43 vm09 bash[34466]: audit 2026-04-15T13:43:42.819714+0000 mon.vm06 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.106:0/1695814776' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:44.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:43 vm09 bash[34466]: audit 2026-04-15T13:43:42.819714+0000 mon.vm06 (mon.0) 1131 : audit [DBG] from='client.? 192.168.123.106:0/1695814776' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:46.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:45 vm06 bash[28114]: cluster 2026-04-15T13:43:44.536513+0000 mgr.vm06.qbbldl (mgr.14229) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:46.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:45 vm06 bash[28114]: cluster 2026-04-15T13:43:44.536513+0000 mgr.vm06.qbbldl (mgr.14229) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:45 vm09 bash[34466]: cluster 2026-04-15T13:43:44.536513+0000 mgr.vm06.qbbldl (mgr.14229) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:46.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:45 vm09 bash[34466]: cluster 2026-04-15T13:43:44.536513+0000 mgr.vm06.qbbldl (mgr.14229) 590 : cluster [DBG] pgmap v320: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:48.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:47 vm06 bash[28114]: cluster 2026-04-15T13:43:46.537052+0000 mgr.vm06.qbbldl (mgr.14229) 591 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:48.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:47 vm06 bash[28114]: cluster 2026-04-15T13:43:46.537052+0000 mgr.vm06.qbbldl (mgr.14229) 591 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:48.023 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:48.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:47 vm09 bash[34466]: cluster 2026-04-15T13:43:46.537052+0000 mgr.vm06.qbbldl (mgr.14229) 591 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:48.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:47 vm09 bash[34466]: cluster 2026-04-15T13:43:46.537052+0000 mgr.vm06.qbbldl (mgr.14229) 591 : cluster [DBG] pgmap v321: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:48.213 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:48.214 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:48.214 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:48.214 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 104s ago 8m - - 2026-04-15T13:43:48.214 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 104s ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:48.453 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:48.453 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:48.453 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:49.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:48 vm06 bash[28114]: audit 2026-04-15T13:43:48.005465+0000 mgr.vm06.qbbldl (mgr.14229) 592 : audit [DBG] from='client.15818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:48 vm06 bash[28114]: audit 2026-04-15T13:43:48.005465+0000 mgr.vm06.qbbldl (mgr.14229) 592 : audit [DBG] from='client.15818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:48 vm06 bash[28114]: audit 2026-04-15T13:43:48.206448+0000 mgr.vm06.qbbldl (mgr.14229) 593 : audit [DBG] from='client.15822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:48 vm06 bash[28114]: audit 2026-04-15T13:43:48.206448+0000 mgr.vm06.qbbldl (mgr.14229) 593 : audit [DBG] from='client.15822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:48 vm06 bash[28114]: audit 2026-04-15T13:43:48.450030+0000 mon.vm06 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.106:0/1451289404' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:49.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:48 vm06 bash[28114]: audit 2026-04-15T13:43:48.450030+0000 mon.vm06 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.106:0/1451289404' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:49.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:48 vm09 bash[34466]: audit 2026-04-15T13:43:48.005465+0000 mgr.vm06.qbbldl (mgr.14229) 592 : audit [DBG] from='client.15818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:48 vm09 bash[34466]: audit 2026-04-15T13:43:48.005465+0000 mgr.vm06.qbbldl (mgr.14229) 592 : audit [DBG] from='client.15818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:48 vm09 bash[34466]: audit 2026-04-15T13:43:48.206448+0000 mgr.vm06.qbbldl (mgr.14229) 593 : audit [DBG] from='client.15822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:48 vm09 bash[34466]: audit 2026-04-15T13:43:48.206448+0000 mgr.vm06.qbbldl (mgr.14229) 593 : audit [DBG] from='client.15822 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:49.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:48 vm09 bash[34466]: audit 2026-04-15T13:43:48.450030+0000 mon.vm06 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.106:0/1451289404' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:49.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:48 vm09 bash[34466]: audit 2026-04-15T13:43:48.450030+0000 mon.vm06 (mon.0) 1132 : audit [DBG] from='client.? 192.168.123.106:0/1451289404' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:49 vm06 bash[28114]: cluster 2026-04-15T13:43:48.537463+0000 mgr.vm06.qbbldl (mgr.14229) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:49 vm06 bash[28114]: cluster 2026-04-15T13:43:48.537463+0000 mgr.vm06.qbbldl (mgr.14229) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:50.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:49 vm09 bash[34466]: cluster 2026-04-15T13:43:48.537463+0000 mgr.vm06.qbbldl (mgr.14229) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:50.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:49 vm09 bash[34466]: cluster 2026-04-15T13:43:48.537463+0000 mgr.vm06.qbbldl (mgr.14229) 594 : cluster [DBG] pgmap v322: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:52.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:51 vm06 bash[28114]: cluster 2026-04-15T13:43:50.537870+0000 mgr.vm06.qbbldl (mgr.14229) 595 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:52.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:51 vm06 bash[28114]: cluster 2026-04-15T13:43:50.537870+0000 mgr.vm06.qbbldl (mgr.14229) 595 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:52.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:51 vm09 bash[34466]: cluster 2026-04-15T13:43:50.537870+0000 mgr.vm06.qbbldl (mgr.14229) 595 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:52.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:51 vm09 bash[34466]: cluster 2026-04-15T13:43:50.537870+0000 mgr.vm06.qbbldl (mgr.14229) 595 : cluster [DBG] pgmap v323: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:43:53.662 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:53.857 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:53.858 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:53.858 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:53.858 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 109s ago 8m - - 2026-04-15T13:43:53.858 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 109s ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:53 vm06 bash[28114]: cluster 2026-04-15T13:43:52.538182+0000 mgr.vm06.qbbldl (mgr.14229) 596 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:53 vm06 bash[28114]: cluster 2026-04-15T13:43:52.538182+0000 mgr.vm06.qbbldl (mgr.14229) 596 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:53 vm06 bash[28114]: audit 2026-04-15T13:43:53.489193+0000 mon.vm06 (mon.0) 1133 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:53 vm06 bash[28114]: audit 2026-04-15T13:43:53.489193+0000 mon.vm06 (mon.0) 1133 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:54.088 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:54.088 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:54.088 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:43:54.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:53 vm09 bash[34466]: cluster 2026-04-15T13:43:52.538182+0000 mgr.vm06.qbbldl (mgr.14229) 596 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:54.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:53 vm09 bash[34466]: cluster 2026-04-15T13:43:52.538182+0000 mgr.vm06.qbbldl (mgr.14229) 596 : cluster [DBG] pgmap v324: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:43:54.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:53 vm09 bash[34466]: audit 2026-04-15T13:43:53.489193+0000 mon.vm06 (mon.0) 1133 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:54.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:53 vm09 bash[34466]: audit 2026-04-15T13:43:53.489193+0000 mon.vm06 (mon.0) 1133 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:43:55.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:54 vm06 bash[28114]: audit 2026-04-15T13:43:53.641653+0000 mgr.vm06.qbbldl (mgr.14229) 597 : audit [DBG] from='client.15830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:54 vm06 bash[28114]: audit 2026-04-15T13:43:53.641653+0000 mgr.vm06.qbbldl (mgr.14229) 597 : audit [DBG] from='client.15830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:54 vm06 bash[28114]: audit 2026-04-15T13:43:53.852015+0000 mgr.vm06.qbbldl (mgr.14229) 598 : audit [DBG] from='client.15834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:54 vm06 bash[28114]: audit 2026-04-15T13:43:53.852015+0000 mgr.vm06.qbbldl (mgr.14229) 598 : audit [DBG] from='client.15834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:54 vm06 bash[28114]: audit 2026-04-15T13:43:54.085210+0000 mon.vm06 (mon.0) 1134 : audit [DBG] from='client.? 192.168.123.106:0/3671618635' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:55.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:54 vm06 bash[28114]: audit 2026-04-15T13:43:54.085210+0000 mon.vm06 (mon.0) 1134 : audit [DBG] from='client.? 192.168.123.106:0/3671618635' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:55.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:54 vm09 bash[34466]: audit 2026-04-15T13:43:53.641653+0000 mgr.vm06.qbbldl (mgr.14229) 597 : audit [DBG] from='client.15830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:54 vm09 bash[34466]: audit 2026-04-15T13:43:53.641653+0000 mgr.vm06.qbbldl (mgr.14229) 597 : audit [DBG] from='client.15830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:54 vm09 bash[34466]: audit 2026-04-15T13:43:53.852015+0000 mgr.vm06.qbbldl (mgr.14229) 598 : audit [DBG] from='client.15834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:54 vm09 bash[34466]: audit 2026-04-15T13:43:53.852015+0000 mgr.vm06.qbbldl (mgr.14229) 598 : audit [DBG] from='client.15834 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:43:55.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:54 vm09 bash[34466]: audit 2026-04-15T13:43:54.085210+0000 mon.vm06 (mon.0) 1134 : audit [DBG] from='client.? 192.168.123.106:0/3671618635' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:55.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:54 vm09 bash[34466]: audit 2026-04-15T13:43:54.085210+0000 mon.vm06 (mon.0) 1134 : audit [DBG] from='client.? 192.168.123.106:0/3671618635' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:43:56.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:55 vm09 bash[34466]: cluster 2026-04-15T13:43:54.538527+0000 mgr.vm06.qbbldl (mgr.14229) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:56.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:55 vm09 bash[34466]: cluster 2026-04-15T13:43:54.538527+0000 mgr.vm06.qbbldl (mgr.14229) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:55 vm06 bash[28114]: cluster 2026-04-15T13:43:54.538527+0000 mgr.vm06.qbbldl (mgr.14229) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:56.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:55 vm06 bash[28114]: cluster 2026-04-15T13:43:54.538527+0000 mgr.vm06.qbbldl (mgr.14229) 599 : cluster [DBG] pgmap v325: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:58.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:57 vm09 bash[34466]: cluster 2026-04-15T13:43:56.538995+0000 mgr.vm06.qbbldl (mgr.14229) 600 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:58.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:57 vm09 bash[34466]: cluster 2026-04-15T13:43:56.538995+0000 mgr.vm06.qbbldl (mgr.14229) 600 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:58.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:57 vm06 bash[28114]: cluster 2026-04-15T13:43:56.538995+0000 mgr.vm06.qbbldl (mgr.14229) 600 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:58.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:57 vm06 bash[28114]: cluster 2026-04-15T13:43:56.538995+0000 mgr.vm06.qbbldl (mgr.14229) 600 : cluster [DBG] pgmap v326: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:43:59.301 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:43:59.493 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:43:59.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:43:59.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:43:59.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 115s ago 8m - - 2026-04-15T13:43:59.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 115s ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:43:59.730 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:43:59.730 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:43:59.730 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:00.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:59 vm09 bash[34466]: cluster 2026-04-15T13:43:58.539502+0000 mgr.vm06.qbbldl (mgr.14229) 601 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:59 vm09 bash[34466]: cluster 2026-04-15T13:43:58.539502+0000 mgr.vm06.qbbldl (mgr.14229) 601 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:59 vm09 bash[34466]: audit 2026-04-15T13:43:59.278292+0000 mgr.vm06.qbbldl (mgr.14229) 602 : audit [DBG] from='client.15842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:59 vm09 bash[34466]: audit 2026-04-15T13:43:59.278292+0000 mgr.vm06.qbbldl (mgr.14229) 602 : audit [DBG] from='client.15842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:59 vm09 bash[34466]: audit 2026-04-15T13:43:59.726825+0000 mon.vm06 (mon.0) 1135 : audit [DBG] from='client.? 192.168.123.106:0/477573650' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:43:59 vm09 bash[34466]: audit 2026-04-15T13:43:59.726825+0000 mon.vm06 (mon.0) 1135 : audit [DBG] from='client.? 192.168.123.106:0/477573650' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:00.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:59 vm06 bash[28114]: cluster 2026-04-15T13:43:58.539502+0000 mgr.vm06.qbbldl (mgr.14229) 601 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:00.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:59 vm06 bash[28114]: cluster 2026-04-15T13:43:58.539502+0000 mgr.vm06.qbbldl (mgr.14229) 601 : cluster [DBG] pgmap v327: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:00.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:59 vm06 bash[28114]: audit 2026-04-15T13:43:59.278292+0000 mgr.vm06.qbbldl (mgr.14229) 602 : audit [DBG] from='client.15842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:00.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:59 vm06 bash[28114]: audit 2026-04-15T13:43:59.278292+0000 mgr.vm06.qbbldl (mgr.14229) 602 : audit [DBG] from='client.15842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:00.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:59 vm06 bash[28114]: audit 2026-04-15T13:43:59.726825+0000 mon.vm06 (mon.0) 1135 : audit [DBG] from='client.? 192.168.123.106:0/477573650' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:00.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:43:59 vm06 bash[28114]: audit 2026-04-15T13:43:59.726825+0000 mon.vm06 (mon.0) 1135 : audit [DBG] from='client.? 192.168.123.106:0/477573650' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:01.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:00 vm09 bash[34466]: audit 2026-04-15T13:43:59.485585+0000 mgr.vm06.qbbldl (mgr.14229) 603 : audit [DBG] from='client.15846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:01.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:00 vm09 bash[34466]: audit 2026-04-15T13:43:59.485585+0000 mgr.vm06.qbbldl (mgr.14229) 603 : audit [DBG] from='client.15846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:01.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:00 vm06 bash[28114]: audit 2026-04-15T13:43:59.485585+0000 mgr.vm06.qbbldl (mgr.14229) 603 : audit [DBG] from='client.15846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:01.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:00 vm06 bash[28114]: audit 2026-04-15T13:43:59.485585+0000 mgr.vm06.qbbldl (mgr.14229) 603 : audit [DBG] from='client.15846 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:02.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:01 vm09 bash[34466]: cluster 2026-04-15T13:44:00.539980+0000 mgr.vm06.qbbldl (mgr.14229) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:02.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:01 vm09 bash[34466]: cluster 2026-04-15T13:44:00.539980+0000 mgr.vm06.qbbldl (mgr.14229) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:02.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:01 vm06 bash[28114]: cluster 2026-04-15T13:44:00.539980+0000 mgr.vm06.qbbldl (mgr.14229) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:02.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:01 vm06 bash[28114]: cluster 2026-04-15T13:44:00.539980+0000 mgr.vm06.qbbldl (mgr.14229) 604 : cluster [DBG] pgmap v328: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:04.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:03 vm09 bash[34466]: cluster 2026-04-15T13:44:02.540467+0000 mgr.vm06.qbbldl (mgr.14229) 605 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:03 vm09 bash[34466]: cluster 2026-04-15T13:44:02.540467+0000 mgr.vm06.qbbldl (mgr.14229) 605 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:04.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:03 vm06 bash[28114]: cluster 2026-04-15T13:44:02.540467+0000 mgr.vm06.qbbldl (mgr.14229) 605 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:04.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:03 vm06 bash[28114]: cluster 2026-04-15T13:44:02.540467+0000 mgr.vm06.qbbldl (mgr.14229) 605 : cluster [DBG] pgmap v329: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:04.935 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:05.107 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:05.107 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:05.107 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:05.107 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 8m - - 2026-04-15T13:44:05.107 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 2m ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:05.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:04 vm09 bash[34466]: audit 2026-04-15T13:44:04.548245+0000 mon.vm06 (mon.0) 1136 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:44:05.108 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:04 vm09 bash[34466]: audit 2026-04-15T13:44:04.548245+0000 mon.vm06 (mon.0) 1136 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:44:05.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:04 vm06 bash[28114]: audit 2026-04-15T13:44:04.548245+0000 mon.vm06 (mon.0) 1136 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:44:05.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:04 vm06 bash[28114]: audit 2026-04-15T13:44:04.548245+0000 mon.vm06 (mon.0) 1136 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:44:05.341 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:05.341 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:05.341 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: cluster 2026-04-15T13:44:04.540934+0000 mgr.vm06.qbbldl (mgr.14229) 606 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: cluster 2026-04-15T13:44:04.540934+0000 mgr.vm06.qbbldl (mgr.14229) 606 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:04.844913+0000 mon.vm06 (mon.0) 1137 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:04.844913+0000 mon.vm06 (mon.0) 1137 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:04.849512+0000 mon.vm06 (mon.0) 1138 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:04.849512+0000 mon.vm06 (mon.0) 1138 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:04.915571+0000 mgr.vm06.qbbldl (mgr.14229) 607 : audit [DBG] from='client.15854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:04.915571+0000 mgr.vm06.qbbldl (mgr.14229) 607 : audit [DBG] from='client.15854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.101171+0000 mgr.vm06.qbbldl (mgr.14229) 608 : audit [DBG] from='client.15858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.101171+0000 mgr.vm06.qbbldl (mgr.14229) 608 : audit [DBG] from='client.15858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.180606+0000 mon.vm06 (mon.0) 1139 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.180606+0000 mon.vm06 (mon.0) 1139 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.181127+0000 mon.vm06 (mon.0) 1140 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.181127+0000 mon.vm06 (mon.0) 1140 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: cluster 2026-04-15T13:44:05.182116+0000 mgr.vm06.qbbldl (mgr.14229) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: cluster 2026-04-15T13:44:05.182116+0000 mgr.vm06.qbbldl (mgr.14229) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: cluster 2026-04-15T13:44:05.182217+0000 mgr.vm06.qbbldl (mgr.14229) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: cluster 2026-04-15T13:44:05.182217+0000 mgr.vm06.qbbldl (mgr.14229) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.186236+0000 mon.vm06 (mon.0) 1141 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.186236+0000 mon.vm06 (mon.0) 1141 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.187928+0000 mon.vm06 (mon.0) 1142 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.187928+0000 mon.vm06 (mon.0) 1142 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.337620+0000 mon.vm06 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.106:0/1781195367' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:05 vm09 bash[34466]: audit 2026-04-15T13:44:05.337620+0000 mon.vm06 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.106:0/1781195367' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:06.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: cluster 2026-04-15T13:44:04.540934+0000 mgr.vm06.qbbldl (mgr.14229) 606 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:06.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: cluster 2026-04-15T13:44:04.540934+0000 mgr.vm06.qbbldl (mgr.14229) 606 : cluster [DBG] pgmap v330: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:06.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:04.844913+0000 mon.vm06 (mon.0) 1137 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:04.844913+0000 mon.vm06 (mon.0) 1137 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:04.849512+0000 mon.vm06 (mon.0) 1138 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:04.849512+0000 mon.vm06 (mon.0) 1138 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:04.915571+0000 mgr.vm06.qbbldl (mgr.14229) 607 : audit [DBG] from='client.15854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:04.915571+0000 mgr.vm06.qbbldl (mgr.14229) 607 : audit [DBG] from='client.15854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.101171+0000 mgr.vm06.qbbldl (mgr.14229) 608 : audit [DBG] from='client.15858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.101171+0000 mgr.vm06.qbbldl (mgr.14229) 608 : audit [DBG] from='client.15858 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.180606+0000 mon.vm06 (mon.0) 1139 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.180606+0000 mon.vm06 (mon.0) 1139 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.181127+0000 mon.vm06 (mon.0) 1140 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.181127+0000 mon.vm06 (mon.0) 1140 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: cluster 2026-04-15T13:44:05.182116+0000 mgr.vm06.qbbldl (mgr.14229) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: cluster 2026-04-15T13:44:05.182116+0000 mgr.vm06.qbbldl (mgr.14229) 609 : cluster [DBG] pgmap v331: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: cluster 2026-04-15T13:44:05.182217+0000 mgr.vm06.qbbldl (mgr.14229) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: cluster 2026-04-15T13:44:05.182217+0000 mgr.vm06.qbbldl (mgr.14229) 610 : cluster [DBG] pgmap v332: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.186236+0000 mon.vm06 (mon.0) 1141 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.186236+0000 mon.vm06 (mon.0) 1141 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.187928+0000 mon.vm06 (mon.0) 1142 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.187928+0000 mon.vm06 (mon.0) 1142 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.337620+0000 mon.vm06 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.106:0/1781195367' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:06.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:05 vm06 bash[28114]: audit 2026-04-15T13:44:05.337620+0000 mon.vm06 (mon.0) 1143 : audit [DBG] from='client.? 192.168.123.106:0/1781195367' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:08.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:08 vm06 bash[28114]: cluster 2026-04-15T13:44:07.182554+0000 mgr.vm06.qbbldl (mgr.14229) 611 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:08.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:08 vm06 bash[28114]: cluster 2026-04-15T13:44:07.182554+0000 mgr.vm06.qbbldl (mgr.14229) 611 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:08.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:08 vm09 bash[34466]: cluster 2026-04-15T13:44:07.182554+0000 mgr.vm06.qbbldl (mgr.14229) 611 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:08.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:08 vm09 bash[34466]: cluster 2026-04-15T13:44:07.182554+0000 mgr.vm06.qbbldl (mgr.14229) 611 : cluster [DBG] pgmap v333: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:09.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:09 vm06 bash[28114]: audit 2026-04-15T13:44:08.489788+0000 mon.vm06 (mon.0) 1144 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:09.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:09 vm06 bash[28114]: audit 2026-04-15T13:44:08.489788+0000 mon.vm06 (mon.0) 1144 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:09 vm09 bash[34466]: audit 2026-04-15T13:44:08.489788+0000 mon.vm06 (mon.0) 1144 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:09 vm09 bash[34466]: audit 2026-04-15T13:44:08.489788+0000 mon.vm06 (mon.0) 1144 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:10.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:10 vm06 bash[28114]: cluster 2026-04-15T13:44:09.183039+0000 mgr.vm06.qbbldl (mgr.14229) 612 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:10.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:10 vm06 bash[28114]: cluster 2026-04-15T13:44:09.183039+0000 mgr.vm06.qbbldl (mgr.14229) 612 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:10.546 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:10 vm09 bash[34466]: cluster 2026-04-15T13:44:09.183039+0000 mgr.vm06.qbbldl (mgr.14229) 612 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:10.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:10 vm09 bash[34466]: cluster 2026-04-15T13:44:09.183039+0000 mgr.vm06.qbbldl (mgr.14229) 612 : cluster [DBG] pgmap v334: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:10.720 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:10.720 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:10.720 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:10.720 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 8m - - 2026-04-15T13:44:10.720 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 2m ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:10.951 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:10.951 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:10.951 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:11.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:11 vm06 bash[28114]: audit 2026-04-15T13:44:10.948245+0000 mon.vm06 (mon.0) 1145 : audit [DBG] from='client.? 192.168.123.106:0/4060239517' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:11.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:11 vm06 bash[28114]: audit 2026-04-15T13:44:10.948245+0000 mon.vm06 (mon.0) 1145 : audit [DBG] from='client.? 192.168.123.106:0/4060239517' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:11 vm09 bash[34466]: audit 2026-04-15T13:44:10.948245+0000 mon.vm06 (mon.0) 1145 : audit [DBG] from='client.? 192.168.123.106:0/4060239517' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:11 vm09 bash[34466]: audit 2026-04-15T13:44:10.948245+0000 mon.vm06 (mon.0) 1145 : audit [DBG] from='client.? 192.168.123.106:0/4060239517' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:12 vm06 bash[28114]: audit 2026-04-15T13:44:10.523252+0000 mgr.vm06.qbbldl (mgr.14229) 613 : audit [DBG] from='client.15866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:12 vm06 bash[28114]: audit 2026-04-15T13:44:10.523252+0000 mgr.vm06.qbbldl (mgr.14229) 613 : audit [DBG] from='client.15866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:12 vm06 bash[28114]: audit 2026-04-15T13:44:10.714286+0000 mgr.vm06.qbbldl (mgr.14229) 614 : audit [DBG] from='client.15870 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:12 vm06 bash[28114]: audit 2026-04-15T13:44:10.714286+0000 mgr.vm06.qbbldl (mgr.14229) 614 : audit [DBG] from='client.15870 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:12 vm06 bash[28114]: cluster 2026-04-15T13:44:11.183584+0000 mgr.vm06.qbbldl (mgr.14229) 615 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:12 vm06 bash[28114]: cluster 2026-04-15T13:44:11.183584+0000 mgr.vm06.qbbldl (mgr.14229) 615 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:12.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:12 vm09 bash[34466]: audit 2026-04-15T13:44:10.523252+0000 mgr.vm06.qbbldl (mgr.14229) 613 : audit [DBG] from='client.15866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:12 vm09 bash[34466]: audit 2026-04-15T13:44:10.523252+0000 mgr.vm06.qbbldl (mgr.14229) 613 : audit [DBG] from='client.15866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:12 vm09 bash[34466]: audit 2026-04-15T13:44:10.714286+0000 mgr.vm06.qbbldl (mgr.14229) 614 : audit [DBG] from='client.15870 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:12 vm09 bash[34466]: audit 2026-04-15T13:44:10.714286+0000 mgr.vm06.qbbldl (mgr.14229) 614 : audit [DBG] from='client.15870 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:12 vm09 bash[34466]: cluster 2026-04-15T13:44:11.183584+0000 mgr.vm06.qbbldl (mgr.14229) 615 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:12 vm09 bash[34466]: cluster 2026-04-15T13:44:11.183584+0000 mgr.vm06.qbbldl (mgr.14229) 615 : cluster [DBG] pgmap v335: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:14.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:14 vm09 bash[34466]: cluster 2026-04-15T13:44:13.184062+0000 mgr.vm06.qbbldl (mgr.14229) 616 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:14.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:14 vm09 bash[34466]: cluster 2026-04-15T13:44:13.184062+0000 mgr.vm06.qbbldl (mgr.14229) 616 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:14 vm06 bash[28114]: cluster 2026-04-15T13:44:13.184062+0000 mgr.vm06.qbbldl (mgr.14229) 616 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:14 vm06 bash[28114]: cluster 2026-04-15T13:44:13.184062+0000 mgr.vm06.qbbldl (mgr.14229) 616 : cluster [DBG] pgmap v336: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 473 B/s wr, 0 op/s 2026-04-15T13:44:16.146 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:16.324 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:16.325 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (7m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:16.325 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:16.325 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 8m - - 2026-04-15T13:44:16.325 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 2m ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:16.544 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:16.545 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:16.545 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:16.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:16 vm09 bash[34466]: cluster 2026-04-15T13:44:15.184450+0000 mgr.vm06.qbbldl (mgr.14229) 617 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:44:16.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:16 vm09 bash[34466]: cluster 2026-04-15T13:44:15.184450+0000 mgr.vm06.qbbldl (mgr.14229) 617 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:44:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:16 vm06 bash[28114]: cluster 2026-04-15T13:44:15.184450+0000 mgr.vm06.qbbldl (mgr.14229) 617 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:44:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:16 vm06 bash[28114]: cluster 2026-04-15T13:44:15.184450+0000 mgr.vm06.qbbldl (mgr.14229) 617 : cluster [DBG] pgmap v337: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:44:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:17 vm09 bash[34466]: audit 2026-04-15T13:44:16.126835+0000 mgr.vm06.qbbldl (mgr.14229) 618 : audit [DBG] from='client.15878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:17 vm09 bash[34466]: audit 2026-04-15T13:44:16.126835+0000 mgr.vm06.qbbldl (mgr.14229) 618 : audit [DBG] from='client.15878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:17 vm09 bash[34466]: audit 2026-04-15T13:44:16.318461+0000 mgr.vm06.qbbldl (mgr.14229) 619 : audit [DBG] from='client.15882 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:17 vm09 bash[34466]: audit 2026-04-15T13:44:16.318461+0000 mgr.vm06.qbbldl (mgr.14229) 619 : audit [DBG] from='client.15882 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:17 vm09 bash[34466]: audit 2026-04-15T13:44:16.541324+0000 mon.vm06 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.106:0/2298284576' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:17 vm09 bash[34466]: audit 2026-04-15T13:44:16.541324+0000 mon.vm06 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.106:0/2298284576' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:17 vm06 bash[28114]: audit 2026-04-15T13:44:16.126835+0000 mgr.vm06.qbbldl (mgr.14229) 618 : audit [DBG] from='client.15878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:17 vm06 bash[28114]: audit 2026-04-15T13:44:16.126835+0000 mgr.vm06.qbbldl (mgr.14229) 618 : audit [DBG] from='client.15878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:17 vm06 bash[28114]: audit 2026-04-15T13:44:16.318461+0000 mgr.vm06.qbbldl (mgr.14229) 619 : audit [DBG] from='client.15882 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:17 vm06 bash[28114]: audit 2026-04-15T13:44:16.318461+0000 mgr.vm06.qbbldl (mgr.14229) 619 : audit [DBG] from='client.15882 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:17 vm06 bash[28114]: audit 2026-04-15T13:44:16.541324+0000 mon.vm06 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.106:0/2298284576' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:17 vm06 bash[28114]: audit 2026-04-15T13:44:16.541324+0000 mon.vm06 (mon.0) 1146 : audit [DBG] from='client.? 192.168.123.106:0/2298284576' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:18.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:18 vm09 bash[34466]: cluster 2026-04-15T13:44:17.185065+0000 mgr.vm06.qbbldl (mgr.14229) 620 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:18 vm09 bash[34466]: cluster 2026-04-15T13:44:17.185065+0000 mgr.vm06.qbbldl (mgr.14229) 620 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:18 vm06 bash[28114]: cluster 2026-04-15T13:44:17.185065+0000 mgr.vm06.qbbldl (mgr.14229) 620 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:18 vm06 bash[28114]: cluster 2026-04-15T13:44:17.185065+0000 mgr.vm06.qbbldl (mgr.14229) 620 : cluster [DBG] pgmap v338: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:20.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:20 vm09 bash[34466]: cluster 2026-04-15T13:44:19.185521+0000 mgr.vm06.qbbldl (mgr.14229) 621 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:20 vm09 bash[34466]: cluster 2026-04-15T13:44:19.185521+0000 mgr.vm06.qbbldl (mgr.14229) 621 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:20 vm06 bash[28114]: cluster 2026-04-15T13:44:19.185521+0000 mgr.vm06.qbbldl (mgr.14229) 621 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:20 vm06 bash[28114]: cluster 2026-04-15T13:44:19.185521+0000 mgr.vm06.qbbldl (mgr.14229) 621 : cluster [DBG] pgmap v339: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:21.747 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:21.924 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:21.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:21.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 8m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:21.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 8m - - 2026-04-15T13:44:21.924 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 2m ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:22.142 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:22.142 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:22.142 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:22 vm09 bash[34466]: cluster 2026-04-15T13:44:21.185940+0000 mgr.vm06.qbbldl (mgr.14229) 622 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:22 vm09 bash[34466]: cluster 2026-04-15T13:44:21.185940+0000 mgr.vm06.qbbldl (mgr.14229) 622 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:22.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:22 vm09 bash[34466]: audit 2026-04-15T13:44:22.138391+0000 mon.vm06 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.106:0/1766823571' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:22.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:22 vm09 bash[34466]: audit 2026-04-15T13:44:22.138391+0000 mon.vm06 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.106:0/1766823571' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:22 vm06 bash[28114]: cluster 2026-04-15T13:44:21.185940+0000 mgr.vm06.qbbldl (mgr.14229) 622 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:22 vm06 bash[28114]: cluster 2026-04-15T13:44:21.185940+0000 mgr.vm06.qbbldl (mgr.14229) 622 : cluster [DBG] pgmap v340: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:22 vm06 bash[28114]: audit 2026-04-15T13:44:22.138391+0000 mon.vm06 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.106:0/1766823571' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:22 vm06 bash[28114]: audit 2026-04-15T13:44:22.138391+0000 mon.vm06 (mon.0) 1147 : audit [DBG] from='client.? 192.168.123.106:0/1766823571' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:23 vm09 bash[34466]: audit 2026-04-15T13:44:21.728141+0000 mgr.vm06.qbbldl (mgr.14229) 623 : audit [DBG] from='client.15890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:23 vm09 bash[34466]: audit 2026-04-15T13:44:21.728141+0000 mgr.vm06.qbbldl (mgr.14229) 623 : audit [DBG] from='client.15890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:23 vm09 bash[34466]: audit 2026-04-15T13:44:21.918018+0000 mgr.vm06.qbbldl (mgr.14229) 624 : audit [DBG] from='client.15894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:23 vm09 bash[34466]: audit 2026-04-15T13:44:21.918018+0000 mgr.vm06.qbbldl (mgr.14229) 624 : audit [DBG] from='client.15894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:23 vm06 bash[28114]: audit 2026-04-15T13:44:21.728141+0000 mgr.vm06.qbbldl (mgr.14229) 623 : audit [DBG] from='client.15890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:23 vm06 bash[28114]: audit 2026-04-15T13:44:21.728141+0000 mgr.vm06.qbbldl (mgr.14229) 623 : audit [DBG] from='client.15890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:23 vm06 bash[28114]: audit 2026-04-15T13:44:21.918018+0000 mgr.vm06.qbbldl (mgr.14229) 624 : audit [DBG] from='client.15894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:23 vm06 bash[28114]: audit 2026-04-15T13:44:21.918018+0000 mgr.vm06.qbbldl (mgr.14229) 624 : audit [DBG] from='client.15894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:24.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:24 vm09 bash[34466]: cluster 2026-04-15T13:44:23.186372+0000 mgr.vm06.qbbldl (mgr.14229) 625 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:24 vm09 bash[34466]: cluster 2026-04-15T13:44:23.186372+0000 mgr.vm06.qbbldl (mgr.14229) 625 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:24 vm09 bash[34466]: audit 2026-04-15T13:44:23.490155+0000 mon.vm06 (mon.0) 1148 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:24 vm09 bash[34466]: audit 2026-04-15T13:44:23.490155+0000 mon.vm06 (mon.0) 1148 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:24 vm06 bash[28114]: cluster 2026-04-15T13:44:23.186372+0000 mgr.vm06.qbbldl (mgr.14229) 625 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:24 vm06 bash[28114]: cluster 2026-04-15T13:44:23.186372+0000 mgr.vm06.qbbldl (mgr.14229) 625 : cluster [DBG] pgmap v341: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:24 vm06 bash[28114]: audit 2026-04-15T13:44:23.490155+0000 mon.vm06 (mon.0) 1148 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:24 vm06 bash[28114]: audit 2026-04-15T13:44:23.490155+0000 mon.vm06 (mon.0) 1148 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:26 vm09 bash[34466]: cluster 2026-04-15T13:44:25.186763+0000 mgr.vm06.qbbldl (mgr.14229) 626 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:26 vm09 bash[34466]: cluster 2026-04-15T13:44:25.186763+0000 mgr.vm06.qbbldl (mgr.14229) 626 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:26 vm06 bash[28114]: cluster 2026-04-15T13:44:25.186763+0000 mgr.vm06.qbbldl (mgr.14229) 626 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:26.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:26 vm06 bash[28114]: cluster 2026-04-15T13:44:25.186763+0000 mgr.vm06.qbbldl (mgr.14229) 626 : cluster [DBG] pgmap v342: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:27.339 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:27.512 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:27.513 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 2m ago 8m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:27.513 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:27.513 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:44:27.513 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (8m) 2m ago 8m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:27.735 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:27.735 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:27.735 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:28 vm09 bash[34466]: cluster 2026-04-15T13:44:27.187133+0000 mgr.vm06.qbbldl (mgr.14229) 627 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:28 vm09 bash[34466]: cluster 2026-04-15T13:44:27.187133+0000 mgr.vm06.qbbldl (mgr.14229) 627 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:28 vm09 bash[34466]: audit 2026-04-15T13:44:27.320219+0000 mgr.vm06.qbbldl (mgr.14229) 628 : audit [DBG] from='client.15902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:28 vm09 bash[34466]: audit 2026-04-15T13:44:27.320219+0000 mgr.vm06.qbbldl (mgr.14229) 628 : audit [DBG] from='client.15902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:28 vm09 bash[34466]: audit 2026-04-15T13:44:27.731294+0000 mon.vm06 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.106:0/2622741371' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:28 vm09 bash[34466]: audit 2026-04-15T13:44:27.731294+0000 mon.vm06 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.106:0/2622741371' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:28.631 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:28 vm06 bash[28114]: cluster 2026-04-15T13:44:27.187133+0000 mgr.vm06.qbbldl (mgr.14229) 627 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:28.631 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:28 vm06 bash[28114]: cluster 2026-04-15T13:44:27.187133+0000 mgr.vm06.qbbldl (mgr.14229) 627 : cluster [DBG] pgmap v343: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:28.631 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:28 vm06 bash[28114]: audit 2026-04-15T13:44:27.320219+0000 mgr.vm06.qbbldl (mgr.14229) 628 : audit [DBG] from='client.15902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:28.631 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:28 vm06 bash[28114]: audit 2026-04-15T13:44:27.320219+0000 mgr.vm06.qbbldl (mgr.14229) 628 : audit [DBG] from='client.15902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:28.631 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:28 vm06 bash[28114]: audit 2026-04-15T13:44:27.731294+0000 mon.vm06 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.106:0/2622741371' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:28.631 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:28 vm06 bash[28114]: audit 2026-04-15T13:44:27.731294+0000 mon.vm06 (mon.0) 1149 : audit [DBG] from='client.? 192.168.123.106:0/2622741371' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:29.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:29 vm09 bash[34466]: audit 2026-04-15T13:44:27.506308+0000 mgr.vm06.qbbldl (mgr.14229) 629 : audit [DBG] from='client.15906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:29.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:29 vm09 bash[34466]: audit 2026-04-15T13:44:27.506308+0000 mgr.vm06.qbbldl (mgr.14229) 629 : audit [DBG] from='client.15906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:29 vm06 bash[28114]: audit 2026-04-15T13:44:27.506308+0000 mgr.vm06.qbbldl (mgr.14229) 629 : audit [DBG] from='client.15906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:29 vm06 bash[28114]: audit 2026-04-15T13:44:27.506308+0000 mgr.vm06.qbbldl (mgr.14229) 629 : audit [DBG] from='client.15906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:30.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:30 vm09 bash[34466]: cluster 2026-04-15T13:44:29.187608+0000 mgr.vm06.qbbldl (mgr.14229) 630 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:30 vm09 bash[34466]: cluster 2026-04-15T13:44:29.187608+0000 mgr.vm06.qbbldl (mgr.14229) 630 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:30 vm06 bash[28114]: cluster 2026-04-15T13:44:29.187608+0000 mgr.vm06.qbbldl (mgr.14229) 630 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:30 vm06 bash[28114]: cluster 2026-04-15T13:44:29.187608+0000 mgr.vm06.qbbldl (mgr.14229) 630 : cluster [DBG] pgmap v344: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:32.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:32 vm09 bash[34466]: cluster 2026-04-15T13:44:31.188007+0000 mgr.vm06.qbbldl (mgr.14229) 631 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:32 vm09 bash[34466]: cluster 2026-04-15T13:44:31.188007+0000 mgr.vm06.qbbldl (mgr.14229) 631 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:32.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:32 vm06 bash[28114]: cluster 2026-04-15T13:44:31.188007+0000 mgr.vm06.qbbldl (mgr.14229) 631 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:32.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:32 vm06 bash[28114]: cluster 2026-04-15T13:44:31.188007+0000 mgr.vm06.qbbldl (mgr.14229) 631 : cluster [DBG] pgmap v345: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:32.943 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:33.132 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:33.132 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 2m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:33.132 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (2m) 2m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:33.132 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:44:33.132 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 2m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:33.361 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:33.361 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:33.361 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:34.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: audit 2026-04-15T13:44:32.921781+0000 mgr.vm06.qbbldl (mgr.14229) 632 : audit [DBG] from='client.15914 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: audit 2026-04-15T13:44:32.921781+0000 mgr.vm06.qbbldl (mgr.14229) 632 : audit [DBG] from='client.15914 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: audit 2026-04-15T13:44:33.125223+0000 mgr.vm06.qbbldl (mgr.14229) 633 : audit [DBG] from='client.15918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: audit 2026-04-15T13:44:33.125223+0000 mgr.vm06.qbbldl (mgr.14229) 633 : audit [DBG] from='client.15918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: cluster 2026-04-15T13:44:33.188410+0000 mgr.vm06.qbbldl (mgr.14229) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: cluster 2026-04-15T13:44:33.188410+0000 mgr.vm06.qbbldl (mgr.14229) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: audit 2026-04-15T13:44:33.357206+0000 mon.vm06 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.106:0/3182481516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:34 vm09 bash[34466]: audit 2026-04-15T13:44:33.357206+0000 mon.vm06 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.106:0/3182481516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: audit 2026-04-15T13:44:32.921781+0000 mgr.vm06.qbbldl (mgr.14229) 632 : audit [DBG] from='client.15914 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: audit 2026-04-15T13:44:32.921781+0000 mgr.vm06.qbbldl (mgr.14229) 632 : audit [DBG] from='client.15914 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: audit 2026-04-15T13:44:33.125223+0000 mgr.vm06.qbbldl (mgr.14229) 633 : audit [DBG] from='client.15918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: audit 2026-04-15T13:44:33.125223+0000 mgr.vm06.qbbldl (mgr.14229) 633 : audit [DBG] from='client.15918 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: cluster 2026-04-15T13:44:33.188410+0000 mgr.vm06.qbbldl (mgr.14229) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: cluster 2026-04-15T13:44:33.188410+0000 mgr.vm06.qbbldl (mgr.14229) 634 : cluster [DBG] pgmap v346: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: audit 2026-04-15T13:44:33.357206+0000 mon.vm06 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.106:0/3182481516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:34 vm06 bash[28114]: audit 2026-04-15T13:44:33.357206+0000 mon.vm06 (mon.0) 1150 : audit [DBG] from='client.? 192.168.123.106:0/3182481516' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:36.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:36 vm09 bash[34466]: cluster 2026-04-15T13:44:35.188843+0000 mgr.vm06.qbbldl (mgr.14229) 635 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:36 vm09 bash[34466]: cluster 2026-04-15T13:44:35.188843+0000 mgr.vm06.qbbldl (mgr.14229) 635 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:36.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:36 vm06 bash[28114]: cluster 2026-04-15T13:44:35.188843+0000 mgr.vm06.qbbldl (mgr.14229) 635 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:36.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:36 vm06 bash[28114]: cluster 2026-04-15T13:44:35.188843+0000 mgr.vm06.qbbldl (mgr.14229) 635 : cluster [DBG] pgmap v347: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:38.564 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:38.748 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:38.748 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 2m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:38.748 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 2m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:38.748 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:44:38.748 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 2m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:38 vm06 bash[28114]: cluster 2026-04-15T13:44:37.189248+0000 mgr.vm06.qbbldl (mgr.14229) 636 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:38 vm06 bash[28114]: cluster 2026-04-15T13:44:37.189248+0000 mgr.vm06.qbbldl (mgr.14229) 636 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:38.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:38 vm09 bash[34466]: cluster 2026-04-15T13:44:37.189248+0000 mgr.vm06.qbbldl (mgr.14229) 636 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:38.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:38 vm09 bash[34466]: cluster 2026-04-15T13:44:37.189248+0000 mgr.vm06.qbbldl (mgr.14229) 636 : cluster [DBG] pgmap v348: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:38.986 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:38.987 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:38.987 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:39 vm06 bash[28114]: audit 2026-04-15T13:44:38.490355+0000 mon.vm06 (mon.0) 1151 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:39 vm06 bash[28114]: audit 2026-04-15T13:44:38.490355+0000 mon.vm06 (mon.0) 1151 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:39 vm06 bash[28114]: audit 2026-04-15T13:44:38.983059+0000 mon.vm06 (mon.0) 1152 : audit [DBG] from='client.? 192.168.123.106:0/2335260805' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:39 vm06 bash[28114]: audit 2026-04-15T13:44:38.983059+0000 mon.vm06 (mon.0) 1152 : audit [DBG] from='client.? 192.168.123.106:0/2335260805' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:39 vm09 bash[34466]: audit 2026-04-15T13:44:38.490355+0000 mon.vm06 (mon.0) 1151 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:39 vm09 bash[34466]: audit 2026-04-15T13:44:38.490355+0000 mon.vm06 (mon.0) 1151 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:39 vm09 bash[34466]: audit 2026-04-15T13:44:38.983059+0000 mon.vm06 (mon.0) 1152 : audit [DBG] from='client.? 192.168.123.106:0/2335260805' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:39 vm09 bash[34466]: audit 2026-04-15T13:44:38.983059+0000 mon.vm06 (mon.0) 1152 : audit [DBG] from='client.? 192.168.123.106:0/2335260805' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:40.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:40 vm06 bash[28114]: audit 2026-04-15T13:44:38.543780+0000 mgr.vm06.qbbldl (mgr.14229) 637 : audit [DBG] from='client.15926 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:40 vm06 bash[28114]: audit 2026-04-15T13:44:38.543780+0000 mgr.vm06.qbbldl (mgr.14229) 637 : audit [DBG] from='client.15926 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:40 vm06 bash[28114]: audit 2026-04-15T13:44:38.741932+0000 mgr.vm06.qbbldl (mgr.14229) 638 : audit [DBG] from='client.15930 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:40 vm06 bash[28114]: audit 2026-04-15T13:44:38.741932+0000 mgr.vm06.qbbldl (mgr.14229) 638 : audit [DBG] from='client.15930 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:40 vm06 bash[28114]: cluster 2026-04-15T13:44:39.189712+0000 mgr.vm06.qbbldl (mgr.14229) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:40.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:40 vm06 bash[28114]: cluster 2026-04-15T13:44:39.189712+0000 mgr.vm06.qbbldl (mgr.14229) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:40 vm09 bash[34466]: audit 2026-04-15T13:44:38.543780+0000 mgr.vm06.qbbldl (mgr.14229) 637 : audit [DBG] from='client.15926 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:40 vm09 bash[34466]: audit 2026-04-15T13:44:38.543780+0000 mgr.vm06.qbbldl (mgr.14229) 637 : audit [DBG] from='client.15926 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:40 vm09 bash[34466]: audit 2026-04-15T13:44:38.741932+0000 mgr.vm06.qbbldl (mgr.14229) 638 : audit [DBG] from='client.15930 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:40 vm09 bash[34466]: audit 2026-04-15T13:44:38.741932+0000 mgr.vm06.qbbldl (mgr.14229) 638 : audit [DBG] from='client.15930 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:40 vm09 bash[34466]: cluster 2026-04-15T13:44:39.189712+0000 mgr.vm06.qbbldl (mgr.14229) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:40 vm09 bash[34466]: cluster 2026-04-15T13:44:39.189712+0000 mgr.vm06.qbbldl (mgr.14229) 639 : cluster [DBG] pgmap v349: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:42.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:42 vm06 bash[28114]: cluster 2026-04-15T13:44:41.190331+0000 mgr.vm06.qbbldl (mgr.14229) 640 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:42.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:42 vm06 bash[28114]: cluster 2026-04-15T13:44:41.190331+0000 mgr.vm06.qbbldl (mgr.14229) 640 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:42.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:42 vm09 bash[34466]: cluster 2026-04-15T13:44:41.190331+0000 mgr.vm06.qbbldl (mgr.14229) 640 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:42.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:42 vm09 bash[34466]: cluster 2026-04-15T13:44:41.190331+0000 mgr.vm06.qbbldl (mgr.14229) 640 : cluster [DBG] pgmap v350: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:44.203 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:44.379 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:44.379 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:44.379 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:44.379 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:44:44.379 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 2m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:44.616 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:44.616 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:44.616 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:44 vm06 bash[28114]: cluster 2026-04-15T13:44:43.190778+0000 mgr.vm06.qbbldl (mgr.14229) 641 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:44 vm06 bash[28114]: cluster 2026-04-15T13:44:43.190778+0000 mgr.vm06.qbbldl (mgr.14229) 641 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:44.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:44 vm09 bash[34466]: cluster 2026-04-15T13:44:43.190778+0000 mgr.vm06.qbbldl (mgr.14229) 641 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:44.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:44 vm09 bash[34466]: cluster 2026-04-15T13:44:43.190778+0000 mgr.vm06.qbbldl (mgr.14229) 641 : cluster [DBG] pgmap v351: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:45.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:45 vm06 bash[28114]: audit 2026-04-15T13:44:44.178681+0000 mgr.vm06.qbbldl (mgr.14229) 642 : audit [DBG] from='client.15938 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:45 vm06 bash[28114]: audit 2026-04-15T13:44:44.178681+0000 mgr.vm06.qbbldl (mgr.14229) 642 : audit [DBG] from='client.15938 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:45 vm06 bash[28114]: audit 2026-04-15T13:44:44.372959+0000 mgr.vm06.qbbldl (mgr.14229) 643 : audit [DBG] from='client.15942 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:45 vm06 bash[28114]: audit 2026-04-15T13:44:44.372959+0000 mgr.vm06.qbbldl (mgr.14229) 643 : audit [DBG] from='client.15942 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:45 vm06 bash[28114]: audit 2026-04-15T13:44:44.612372+0000 mon.vm06 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.106:0/2983633886' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:45 vm06 bash[28114]: audit 2026-04-15T13:44:44.612372+0000 mon.vm06 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.106:0/2983633886' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:45.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:45 vm09 bash[34466]: audit 2026-04-15T13:44:44.178681+0000 mgr.vm06.qbbldl (mgr.14229) 642 : audit [DBG] from='client.15938 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:45 vm09 bash[34466]: audit 2026-04-15T13:44:44.178681+0000 mgr.vm06.qbbldl (mgr.14229) 642 : audit [DBG] from='client.15938 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:45 vm09 bash[34466]: audit 2026-04-15T13:44:44.372959+0000 mgr.vm06.qbbldl (mgr.14229) 643 : audit [DBG] from='client.15942 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:45 vm09 bash[34466]: audit 2026-04-15T13:44:44.372959+0000 mgr.vm06.qbbldl (mgr.14229) 643 : audit [DBG] from='client.15942 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:45 vm09 bash[34466]: audit 2026-04-15T13:44:44.612372+0000 mon.vm06 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.106:0/2983633886' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:45 vm09 bash[34466]: audit 2026-04-15T13:44:44.612372+0000 mon.vm06 (mon.0) 1153 : audit [DBG] from='client.? 192.168.123.106:0/2983633886' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:46 vm06 bash[28114]: cluster 2026-04-15T13:44:45.191324+0000 mgr.vm06.qbbldl (mgr.14229) 644 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:46 vm06 bash[28114]: cluster 2026-04-15T13:44:45.191324+0000 mgr.vm06.qbbldl (mgr.14229) 644 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:46 vm09 bash[34466]: cluster 2026-04-15T13:44:45.191324+0000 mgr.vm06.qbbldl (mgr.14229) 644 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:46 vm09 bash[34466]: cluster 2026-04-15T13:44:45.191324+0000 mgr.vm06.qbbldl (mgr.14229) 644 : cluster [DBG] pgmap v352: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:48.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:48 vm06 bash[28114]: cluster 2026-04-15T13:44:47.191730+0000 mgr.vm06.qbbldl (mgr.14229) 645 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:48.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:48 vm06 bash[28114]: cluster 2026-04-15T13:44:47.191730+0000 mgr.vm06.qbbldl (mgr.14229) 645 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:48.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:48 vm09 bash[34466]: cluster 2026-04-15T13:44:47.191730+0000 mgr.vm06.qbbldl (mgr.14229) 645 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:48.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:48 vm09 bash[34466]: cluster 2026-04-15T13:44:47.191730+0000 mgr.vm06.qbbldl (mgr.14229) 645 : cluster [DBG] pgmap v353: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:49.832 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:50.014 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:50.014 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:50.014 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:50.014 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:44:50.014 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 2m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:50.248 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:50.248 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:50.248 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:50.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:50 vm06 bash[28114]: cluster 2026-04-15T13:44:49.192145+0000 mgr.vm06.qbbldl (mgr.14229) 646 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:50.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:50 vm06 bash[28114]: cluster 2026-04-15T13:44:49.192145+0000 mgr.vm06.qbbldl (mgr.14229) 646 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:50.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:50 vm06 bash[28114]: audit 2026-04-15T13:44:50.244760+0000 mon.vm06 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.106:0/74338148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:50.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:50 vm06 bash[28114]: audit 2026-04-15T13:44:50.244760+0000 mon.vm06 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.106:0/74338148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:50 vm09 bash[34466]: cluster 2026-04-15T13:44:49.192145+0000 mgr.vm06.qbbldl (mgr.14229) 646 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:50 vm09 bash[34466]: cluster 2026-04-15T13:44:49.192145+0000 mgr.vm06.qbbldl (mgr.14229) 646 : cluster [DBG] pgmap v354: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:44:50.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:50 vm09 bash[34466]: audit 2026-04-15T13:44:50.244760+0000 mon.vm06 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.106:0/74338148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:50 vm09 bash[34466]: audit 2026-04-15T13:44:50.244760+0000 mon.vm06 (mon.0) 1154 : audit [DBG] from='client.? 192.168.123.106:0/74338148' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:51 vm06 bash[28114]: audit 2026-04-15T13:44:49.811810+0000 mgr.vm06.qbbldl (mgr.14229) 647 : audit [DBG] from='client.15950 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:51 vm06 bash[28114]: audit 2026-04-15T13:44:49.811810+0000 mgr.vm06.qbbldl (mgr.14229) 647 : audit [DBG] from='client.15950 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:51 vm06 bash[28114]: audit 2026-04-15T13:44:50.007401+0000 mgr.vm06.qbbldl (mgr.14229) 648 : audit [DBG] from='client.15954 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:51 vm06 bash[28114]: audit 2026-04-15T13:44:50.007401+0000 mgr.vm06.qbbldl (mgr.14229) 648 : audit [DBG] from='client.15954 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:51 vm09 bash[34466]: audit 2026-04-15T13:44:49.811810+0000 mgr.vm06.qbbldl (mgr.14229) 647 : audit [DBG] from='client.15950 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:51 vm09 bash[34466]: audit 2026-04-15T13:44:49.811810+0000 mgr.vm06.qbbldl (mgr.14229) 647 : audit [DBG] from='client.15950 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:51 vm09 bash[34466]: audit 2026-04-15T13:44:50.007401+0000 mgr.vm06.qbbldl (mgr.14229) 648 : audit [DBG] from='client.15954 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:51 vm09 bash[34466]: audit 2026-04-15T13:44:50.007401+0000 mgr.vm06.qbbldl (mgr.14229) 648 : audit [DBG] from='client.15954 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:52.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:52 vm06 bash[28114]: cluster 2026-04-15T13:44:51.192544+0000 mgr.vm06.qbbldl (mgr.14229) 649 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:52.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:52 vm06 bash[28114]: cluster 2026-04-15T13:44:51.192544+0000 mgr.vm06.qbbldl (mgr.14229) 649 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:52.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:52 vm09 bash[34466]: cluster 2026-04-15T13:44:51.192544+0000 mgr.vm06.qbbldl (mgr.14229) 649 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:52.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:52 vm09 bash[34466]: cluster 2026-04-15T13:44:51.192544+0000 mgr.vm06.qbbldl (mgr.14229) 649 : cluster [DBG] pgmap v355: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:54 vm06 bash[28114]: cluster 2026-04-15T13:44:53.193031+0000 mgr.vm06.qbbldl (mgr.14229) 650 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:54 vm06 bash[28114]: cluster 2026-04-15T13:44:53.193031+0000 mgr.vm06.qbbldl (mgr.14229) 650 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:54 vm06 bash[28114]: audit 2026-04-15T13:44:53.490673+0000 mon.vm06 (mon.0) 1155 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:54 vm06 bash[28114]: audit 2026-04-15T13:44:53.490673+0000 mon.vm06 (mon.0) 1155 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:54 vm09 bash[34466]: cluster 2026-04-15T13:44:53.193031+0000 mgr.vm06.qbbldl (mgr.14229) 650 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:54 vm09 bash[34466]: cluster 2026-04-15T13:44:53.193031+0000 mgr.vm06.qbbldl (mgr.14229) 650 : cluster [DBG] pgmap v356: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:44:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:54 vm09 bash[34466]: audit 2026-04-15T13:44:53.490673+0000 mon.vm06 (mon.0) 1155 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:54.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:54 vm09 bash[34466]: audit 2026-04-15T13:44:53.490673+0000 mon.vm06 (mon.0) 1155 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:44:55.467 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:44:55.651 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:44:55.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:44:55.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:44:55.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:44:55.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 2m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:44:55.883 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:44:55.884 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:44:55.884 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:44:56.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:56 vm06 bash[28114]: cluster 2026-04-15T13:44:55.193409+0000 mgr.vm06.qbbldl (mgr.14229) 651 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:56 vm06 bash[28114]: cluster 2026-04-15T13:44:55.193409+0000 mgr.vm06.qbbldl (mgr.14229) 651 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:56 vm06 bash[28114]: audit 2026-04-15T13:44:55.879945+0000 mon.vm06 (mon.0) 1156 : audit [DBG] from='client.? 192.168.123.106:0/46467503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:56 vm06 bash[28114]: audit 2026-04-15T13:44:55.879945+0000 mon.vm06 (mon.0) 1156 : audit [DBG] from='client.? 192.168.123.106:0/46467503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:56 vm09 bash[34466]: cluster 2026-04-15T13:44:55.193409+0000 mgr.vm06.qbbldl (mgr.14229) 651 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:56 vm09 bash[34466]: cluster 2026-04-15T13:44:55.193409+0000 mgr.vm06.qbbldl (mgr.14229) 651 : cluster [DBG] pgmap v357: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:56 vm09 bash[34466]: audit 2026-04-15T13:44:55.879945+0000 mon.vm06 (mon.0) 1156 : audit [DBG] from='client.? 192.168.123.106:0/46467503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:56 vm09 bash[34466]: audit 2026-04-15T13:44:55.879945+0000 mon.vm06 (mon.0) 1156 : audit [DBG] from='client.? 192.168.123.106:0/46467503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:44:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:57 vm06 bash[28114]: audit 2026-04-15T13:44:55.447280+0000 mgr.vm06.qbbldl (mgr.14229) 652 : audit [DBG] from='client.15962 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:57 vm06 bash[28114]: audit 2026-04-15T13:44:55.447280+0000 mgr.vm06.qbbldl (mgr.14229) 652 : audit [DBG] from='client.15962 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:57 vm06 bash[28114]: audit 2026-04-15T13:44:55.645324+0000 mgr.vm06.qbbldl (mgr.14229) 653 : audit [DBG] from='client.15966 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:57 vm06 bash[28114]: audit 2026-04-15T13:44:55.645324+0000 mgr.vm06.qbbldl (mgr.14229) 653 : audit [DBG] from='client.15966 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:57 vm09 bash[34466]: audit 2026-04-15T13:44:55.447280+0000 mgr.vm06.qbbldl (mgr.14229) 652 : audit [DBG] from='client.15962 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:57 vm09 bash[34466]: audit 2026-04-15T13:44:55.447280+0000 mgr.vm06.qbbldl (mgr.14229) 652 : audit [DBG] from='client.15962 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:57 vm09 bash[34466]: audit 2026-04-15T13:44:55.645324+0000 mgr.vm06.qbbldl (mgr.14229) 653 : audit [DBG] from='client.15966 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:57 vm09 bash[34466]: audit 2026-04-15T13:44:55.645324+0000 mgr.vm06.qbbldl (mgr.14229) 653 : audit [DBG] from='client.15966 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:44:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:58 vm06 bash[28114]: cluster 2026-04-15T13:44:57.193775+0000 mgr.vm06.qbbldl (mgr.14229) 654 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:58.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:58 vm06 bash[28114]: cluster 2026-04-15T13:44:57.193775+0000 mgr.vm06.qbbldl (mgr.14229) 654 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:58.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:58 vm09 bash[34466]: cluster 2026-04-15T13:44:57.193775+0000 mgr.vm06.qbbldl (mgr.14229) 654 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:58 vm09 bash[34466]: cluster 2026-04-15T13:44:57.193775+0000 mgr.vm06.qbbldl (mgr.14229) 654 : cluster [DBG] pgmap v358: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:59.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:59 vm06 bash[28114]: cluster 2026-04-15T13:44:59.194166+0000 mgr.vm06.qbbldl (mgr.14229) 655 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:59.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:44:59 vm06 bash[28114]: cluster 2026-04-15T13:44:59.194166+0000 mgr.vm06.qbbldl (mgr.14229) 655 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:59 vm09 bash[34466]: cluster 2026-04-15T13:44:59.194166+0000 mgr.vm06.qbbldl (mgr.14229) 655 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:44:59.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:44:59 vm09 bash[34466]: cluster 2026-04-15T13:44:59.194166+0000 mgr.vm06.qbbldl (mgr.14229) 655 : cluster [DBG] pgmap v359: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:01.084 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:01.264 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:01.264 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:01.264 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:01.264 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 2m ago 9m - - 2026-04-15T13:45:01.264 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 2m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:01.497 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:01.497 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:01.497 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:02.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: audit 2026-04-15T13:45:01.064844+0000 mgr.vm06.qbbldl (mgr.14229) 656 : audit [DBG] from='client.15974 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: audit 2026-04-15T13:45:01.064844+0000 mgr.vm06.qbbldl (mgr.14229) 656 : audit [DBG] from='client.15974 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: cluster 2026-04-15T13:45:01.194556+0000 mgr.vm06.qbbldl (mgr.14229) 657 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:02.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: cluster 2026-04-15T13:45:01.194556+0000 mgr.vm06.qbbldl (mgr.14229) 657 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:02.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: audit 2026-04-15T13:45:01.258213+0000 mgr.vm06.qbbldl (mgr.14229) 658 : audit [DBG] from='client.15978 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: audit 2026-04-15T13:45:01.258213+0000 mgr.vm06.qbbldl (mgr.14229) 658 : audit [DBG] from='client.15978 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: audit 2026-04-15T13:45:01.493640+0000 mon.vm06 (mon.0) 1157 : audit [DBG] from='client.? 192.168.123.106:0/753153463' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:02.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:02 vm06 bash[28114]: audit 2026-04-15T13:45:01.493640+0000 mon.vm06 (mon.0) 1157 : audit [DBG] from='client.? 192.168.123.106:0/753153463' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:02.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: audit 2026-04-15T13:45:01.064844+0000 mgr.vm06.qbbldl (mgr.14229) 656 : audit [DBG] from='client.15974 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: audit 2026-04-15T13:45:01.064844+0000 mgr.vm06.qbbldl (mgr.14229) 656 : audit [DBG] from='client.15974 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: cluster 2026-04-15T13:45:01.194556+0000 mgr.vm06.qbbldl (mgr.14229) 657 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: cluster 2026-04-15T13:45:01.194556+0000 mgr.vm06.qbbldl (mgr.14229) 657 : cluster [DBG] pgmap v360: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: audit 2026-04-15T13:45:01.258213+0000 mgr.vm06.qbbldl (mgr.14229) 658 : audit [DBG] from='client.15978 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: audit 2026-04-15T13:45:01.258213+0000 mgr.vm06.qbbldl (mgr.14229) 658 : audit [DBG] from='client.15978 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: audit 2026-04-15T13:45:01.493640+0000 mon.vm06 (mon.0) 1157 : audit [DBG] from='client.? 192.168.123.106:0/753153463' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:02 vm09 bash[34466]: audit 2026-04-15T13:45:01.493640+0000 mon.vm06 (mon.0) 1157 : audit [DBG] from='client.? 192.168.123.106:0/753153463' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:04.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:04 vm06 bash[28114]: cluster 2026-04-15T13:45:03.194984+0000 mgr.vm06.qbbldl (mgr.14229) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:04.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:04 vm06 bash[28114]: cluster 2026-04-15T13:45:03.194984+0000 mgr.vm06.qbbldl (mgr.14229) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:04 vm09 bash[34466]: cluster 2026-04-15T13:45:03.194984+0000 mgr.vm06.qbbldl (mgr.14229) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:04.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:04 vm09 bash[34466]: cluster 2026-04-15T13:45:03.194984+0000 mgr.vm06.qbbldl (mgr.14229) 659 : cluster [DBG] pgmap v361: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:05 vm09 bash[34466]: audit 2026-04-15T13:45:05.202562+0000 mon.vm06 (mon.0) 1158 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:45:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:05 vm09 bash[34466]: audit 2026-04-15T13:45:05.202562+0000 mon.vm06 (mon.0) 1158 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:45:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:05 vm06 bash[28114]: audit 2026-04-15T13:45:05.202562+0000 mon.vm06 (mon.0) 1158 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:45:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:05 vm06 bash[28114]: audit 2026-04-15T13:45:05.202562+0000 mon.vm06 (mon.0) 1158 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: cluster 2026-04-15T13:45:05.195435+0000 mgr.vm06.qbbldl (mgr.14229) 660 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: cluster 2026-04-15T13:45:05.195435+0000 mgr.vm06.qbbldl (mgr.14229) 660 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.521877+0000 mon.vm06 (mon.0) 1159 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.521877+0000 mon.vm06 (mon.0) 1159 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.530301+0000 mon.vm06 (mon.0) 1160 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.530301+0000 mon.vm06 (mon.0) 1160 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.549020+0000 mon.vm06 (mon.0) 1161 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.549020+0000 mon.vm06 (mon.0) 1161 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.864776+0000 mon.vm06 (mon.0) 1162 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.864776+0000 mon.vm06 (mon.0) 1162 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.865464+0000 mon.vm06 (mon.0) 1163 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.865464+0000 mon.vm06 (mon.0) 1163 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.865869+0000 mon.vm06 (mon.0) 1164 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.865869+0000 mon.vm06 (mon.0) 1164 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.870778+0000 mon.vm06 (mon.0) 1165 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.870778+0000 mon.vm06 (mon.0) 1165 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.872118+0000 mon.vm06 (mon.0) 1166 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:45:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:06 vm09 bash[34466]: audit 2026-04-15T13:45:05.872118+0000 mon.vm06 (mon.0) 1166 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:45:06.691 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: cluster 2026-04-15T13:45:05.195435+0000 mgr.vm06.qbbldl (mgr.14229) 660 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: cluster 2026-04-15T13:45:05.195435+0000 mgr.vm06.qbbldl (mgr.14229) 660 : cluster [DBG] pgmap v362: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.521877+0000 mon.vm06 (mon.0) 1159 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.521877+0000 mon.vm06 (mon.0) 1159 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.530301+0000 mon.vm06 (mon.0) 1160 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.530301+0000 mon.vm06 (mon.0) 1160 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.549020+0000 mon.vm06 (mon.0) 1161 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.549020+0000 mon.vm06 (mon.0) 1161 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm06", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.864776+0000 mon.vm06 (mon.0) 1162 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.864776+0000 mon.vm06 (mon.0) 1162 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config rm", "who": "osd/host:vm09", "name": "osd_memory_target"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.865464+0000 mon.vm06 (mon.0) 1163 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.865464+0000 mon.vm06 (mon.0) 1163 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.865869+0000 mon.vm06 (mon.0) 1164 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.865869+0000 mon.vm06 (mon.0) 1164 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.870778+0000 mon.vm06 (mon.0) 1165 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.870778+0000 mon.vm06 (mon.0) 1165 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.872118+0000 mon.vm06 (mon.0) 1166 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:45:06.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:06 vm06 bash[28114]: audit 2026-04-15T13:45:05.872118+0000 mon.vm06 (mon.0) 1166 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:45:06.861 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:06.861 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:06.862 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:06.862 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 9m - - 2026-04-15T13:45:06.862 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 3m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:07.082 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:07.082 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:07.082 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:07.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:07 vm09 bash[34466]: cluster 2026-04-15T13:45:05.866745+0000 mgr.vm06.qbbldl (mgr.14229) 661 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:07 vm09 bash[34466]: cluster 2026-04-15T13:45:05.866745+0000 mgr.vm06.qbbldl (mgr.14229) 661 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:07 vm09 bash[34466]: cluster 2026-04-15T13:45:05.867053+0000 mgr.vm06.qbbldl (mgr.14229) 662 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:07 vm09 bash[34466]: cluster 2026-04-15T13:45:05.867053+0000 mgr.vm06.qbbldl (mgr.14229) 662 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:07 vm09 bash[34466]: audit 2026-04-15T13:45:07.078505+0000 mon.vm06 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.106:0/1246741191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:07 vm09 bash[34466]: audit 2026-04-15T13:45:07.078505+0000 mon.vm06 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.106:0/1246741191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:07 vm06 bash[28114]: cluster 2026-04-15T13:45:05.866745+0000 mgr.vm06.qbbldl (mgr.14229) 661 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:07 vm06 bash[28114]: cluster 2026-04-15T13:45:05.866745+0000 mgr.vm06.qbbldl (mgr.14229) 661 : cluster [DBG] pgmap v363: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:07 vm06 bash[28114]: cluster 2026-04-15T13:45:05.867053+0000 mgr.vm06.qbbldl (mgr.14229) 662 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:07 vm06 bash[28114]: cluster 2026-04-15T13:45:05.867053+0000 mgr.vm06.qbbldl (mgr.14229) 662 : cluster [DBG] pgmap v364: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:07 vm06 bash[28114]: audit 2026-04-15T13:45:07.078505+0000 mon.vm06 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.106:0/1246741191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:07 vm06 bash[28114]: audit 2026-04-15T13:45:07.078505+0000 mon.vm06 (mon.0) 1167 : audit [DBG] from='client.? 192.168.123.106:0/1246741191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:08.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:08 vm09 bash[34466]: audit 2026-04-15T13:45:06.672441+0000 mgr.vm06.qbbldl (mgr.14229) 663 : audit [DBG] from='client.15986 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:08 vm09 bash[34466]: audit 2026-04-15T13:45:06.672441+0000 mgr.vm06.qbbldl (mgr.14229) 663 : audit [DBG] from='client.15986 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:08 vm09 bash[34466]: audit 2026-04-15T13:45:06.855600+0000 mgr.vm06.qbbldl (mgr.14229) 664 : audit [DBG] from='client.15990 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:08 vm09 bash[34466]: audit 2026-04-15T13:45:06.855600+0000 mgr.vm06.qbbldl (mgr.14229) 664 : audit [DBG] from='client.15990 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:08 vm06 bash[28114]: audit 2026-04-15T13:45:06.672441+0000 mgr.vm06.qbbldl (mgr.14229) 663 : audit [DBG] from='client.15986 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:08 vm06 bash[28114]: audit 2026-04-15T13:45:06.672441+0000 mgr.vm06.qbbldl (mgr.14229) 663 : audit [DBG] from='client.15986 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:08 vm06 bash[28114]: audit 2026-04-15T13:45:06.855600+0000 mgr.vm06.qbbldl (mgr.14229) 664 : audit [DBG] from='client.15990 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:08 vm06 bash[28114]: audit 2026-04-15T13:45:06.855600+0000 mgr.vm06.qbbldl (mgr.14229) 664 : audit [DBG] from='client.15990 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:09.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:09 vm09 bash[34466]: cluster 2026-04-15T13:45:07.867404+0000 mgr.vm06.qbbldl (mgr.14229) 665 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:09 vm09 bash[34466]: cluster 2026-04-15T13:45:07.867404+0000 mgr.vm06.qbbldl (mgr.14229) 665 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:09 vm09 bash[34466]: audit 2026-04-15T13:45:08.490725+0000 mon.vm06 (mon.0) 1168 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:09 vm09 bash[34466]: audit 2026-04-15T13:45:08.490725+0000 mon.vm06 (mon.0) 1168 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:09 vm06 bash[28114]: cluster 2026-04-15T13:45:07.867404+0000 mgr.vm06.qbbldl (mgr.14229) 665 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:09 vm06 bash[28114]: cluster 2026-04-15T13:45:07.867404+0000 mgr.vm06.qbbldl (mgr.14229) 665 : cluster [DBG] pgmap v365: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:09 vm06 bash[28114]: audit 2026-04-15T13:45:08.490725+0000 mon.vm06 (mon.0) 1168 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:09 vm06 bash[28114]: audit 2026-04-15T13:45:08.490725+0000 mon.vm06 (mon.0) 1168 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:11.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:11 vm09 bash[34466]: cluster 2026-04-15T13:45:09.867901+0000 mgr.vm06.qbbldl (mgr.14229) 666 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:11 vm09 bash[34466]: cluster 2026-04-15T13:45:09.867901+0000 mgr.vm06.qbbldl (mgr.14229) 666 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:11 vm06 bash[28114]: cluster 2026-04-15T13:45:09.867901+0000 mgr.vm06.qbbldl (mgr.14229) 666 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:11 vm06 bash[28114]: cluster 2026-04-15T13:45:09.867901+0000 mgr.vm06.qbbldl (mgr.14229) 666 : cluster [DBG] pgmap v366: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:12.285 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:12.479 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:12.479 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:12.479 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:12.479 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 9m - - 2026-04-15T13:45:12.479 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 3m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:12.720 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:12.721 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:12.721 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:13 vm09 bash[34466]: cluster 2026-04-15T13:45:11.868340+0000 mgr.vm06.qbbldl (mgr.14229) 667 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:13 vm09 bash[34466]: cluster 2026-04-15T13:45:11.868340+0000 mgr.vm06.qbbldl (mgr.14229) 667 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:13 vm09 bash[34466]: audit 2026-04-15T13:45:12.266121+0000 mgr.vm06.qbbldl (mgr.14229) 668 : audit [DBG] from='client.15998 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:13 vm09 bash[34466]: audit 2026-04-15T13:45:12.266121+0000 mgr.vm06.qbbldl (mgr.14229) 668 : audit [DBG] from='client.15998 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:13 vm09 bash[34466]: audit 2026-04-15T13:45:12.716953+0000 mon.vm06 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.106:0/1265608768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:13 vm09 bash[34466]: audit 2026-04-15T13:45:12.716953+0000 mon.vm06 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.106:0/1265608768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:13 vm06 bash[28114]: cluster 2026-04-15T13:45:11.868340+0000 mgr.vm06.qbbldl (mgr.14229) 667 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:13 vm06 bash[28114]: cluster 2026-04-15T13:45:11.868340+0000 mgr.vm06.qbbldl (mgr.14229) 667 : cluster [DBG] pgmap v367: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:13 vm06 bash[28114]: audit 2026-04-15T13:45:12.266121+0000 mgr.vm06.qbbldl (mgr.14229) 668 : audit [DBG] from='client.15998 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:13 vm06 bash[28114]: audit 2026-04-15T13:45:12.266121+0000 mgr.vm06.qbbldl (mgr.14229) 668 : audit [DBG] from='client.15998 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:13 vm06 bash[28114]: audit 2026-04-15T13:45:12.716953+0000 mon.vm06 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.106:0/1265608768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:13.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:13 vm06 bash[28114]: audit 2026-04-15T13:45:12.716953+0000 mon.vm06 (mon.0) 1169 : audit [DBG] from='client.? 192.168.123.106:0/1265608768' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:14.608 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:14 vm09 bash[34466]: audit 2026-04-15T13:45:12.472096+0000 mgr.vm06.qbbldl (mgr.14229) 669 : audit [DBG] from='client.16002 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:14 vm09 bash[34466]: audit 2026-04-15T13:45:12.472096+0000 mgr.vm06.qbbldl (mgr.14229) 669 : audit [DBG] from='client.16002 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:14 vm06 bash[28114]: audit 2026-04-15T13:45:12.472096+0000 mgr.vm06.qbbldl (mgr.14229) 669 : audit [DBG] from='client.16002 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:14.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:14 vm06 bash[28114]: audit 2026-04-15T13:45:12.472096+0000 mgr.vm06.qbbldl (mgr.14229) 669 : audit [DBG] from='client.16002 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:15 vm09 bash[34466]: cluster 2026-04-15T13:45:13.868776+0000 mgr.vm06.qbbldl (mgr.14229) 670 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:15 vm09 bash[34466]: cluster 2026-04-15T13:45:13.868776+0000 mgr.vm06.qbbldl (mgr.14229) 670 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:15 vm06 bash[28114]: cluster 2026-04-15T13:45:13.868776+0000 mgr.vm06.qbbldl (mgr.14229) 670 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:15 vm06 bash[28114]: cluster 2026-04-15T13:45:13.868776+0000 mgr.vm06.qbbldl (mgr.14229) 670 : cluster [DBG] pgmap v368: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 236 B/s rd, 472 B/s wr, 0 op/s 2026-04-15T13:45:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:17 vm09 bash[34466]: cluster 2026-04-15T13:45:15.869188+0000 mgr.vm06.qbbldl (mgr.14229) 671 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:45:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:17 vm09 bash[34466]: cluster 2026-04-15T13:45:15.869188+0000 mgr.vm06.qbbldl (mgr.14229) 671 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:45:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:17 vm06 bash[28114]: cluster 2026-04-15T13:45:15.869188+0000 mgr.vm06.qbbldl (mgr.14229) 671 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:45:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:17 vm06 bash[28114]: cluster 2026-04-15T13:45:15.869188+0000 mgr.vm06.qbbldl (mgr.14229) 671 : cluster [DBG] pgmap v369: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:45:17.924 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:18.114 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:18.114 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (8m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:18.114 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:18.114 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 9m - - 2026-04-15T13:45:18.114 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 3m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:18.339 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:18.339 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:18.339 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: cluster 2026-04-15T13:45:17.869548+0000 mgr.vm06.qbbldl (mgr.14229) 672 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: cluster 2026-04-15T13:45:17.869548+0000 mgr.vm06.qbbldl (mgr.14229) 672 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: audit 2026-04-15T13:45:17.903357+0000 mgr.vm06.qbbldl (mgr.14229) 673 : audit [DBG] from='client.16010 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: audit 2026-04-15T13:45:17.903357+0000 mgr.vm06.qbbldl (mgr.14229) 673 : audit [DBG] from='client.16010 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: audit 2026-04-15T13:45:18.107367+0000 mgr.vm06.qbbldl (mgr.14229) 674 : audit [DBG] from='client.25323 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: audit 2026-04-15T13:45:18.107367+0000 mgr.vm06.qbbldl (mgr.14229) 674 : audit [DBG] from='client.25323 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: audit 2026-04-15T13:45:18.335821+0000 mon.vm06 (mon.0) 1170 : audit [DBG] from='client.? 192.168.123.106:0/4026744775' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:19 vm09 bash[34466]: audit 2026-04-15T13:45:18.335821+0000 mon.vm06 (mon.0) 1170 : audit [DBG] from='client.? 192.168.123.106:0/4026744775' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: cluster 2026-04-15T13:45:17.869548+0000 mgr.vm06.qbbldl (mgr.14229) 672 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: cluster 2026-04-15T13:45:17.869548+0000 mgr.vm06.qbbldl (mgr.14229) 672 : cluster [DBG] pgmap v370: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: audit 2026-04-15T13:45:17.903357+0000 mgr.vm06.qbbldl (mgr.14229) 673 : audit [DBG] from='client.16010 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: audit 2026-04-15T13:45:17.903357+0000 mgr.vm06.qbbldl (mgr.14229) 673 : audit [DBG] from='client.16010 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: audit 2026-04-15T13:45:18.107367+0000 mgr.vm06.qbbldl (mgr.14229) 674 : audit [DBG] from='client.25323 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: audit 2026-04-15T13:45:18.107367+0000 mgr.vm06.qbbldl (mgr.14229) 674 : audit [DBG] from='client.25323 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: audit 2026-04-15T13:45:18.335821+0000 mon.vm06 (mon.0) 1170 : audit [DBG] from='client.? 192.168.123.106:0/4026744775' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:19 vm06 bash[28114]: audit 2026-04-15T13:45:18.335821+0000 mon.vm06 (mon.0) 1170 : audit [DBG] from='client.? 192.168.123.106:0/4026744775' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:21 vm09 bash[34466]: cluster 2026-04-15T13:45:19.869908+0000 mgr.vm06.qbbldl (mgr.14229) 675 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:21.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:21 vm09 bash[34466]: cluster 2026-04-15T13:45:19.869908+0000 mgr.vm06.qbbldl (mgr.14229) 675 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:21 vm06 bash[28114]: cluster 2026-04-15T13:45:19.869908+0000 mgr.vm06.qbbldl (mgr.14229) 675 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:21 vm06 bash[28114]: cluster 2026-04-15T13:45:19.869908+0000 mgr.vm06.qbbldl (mgr.14229) 675 : cluster [DBG] pgmap v371: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:45:23.543 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:23 vm09 bash[34466]: cluster 2026-04-15T13:45:21.870287+0000 mgr.vm06.qbbldl (mgr.14229) 676 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:23.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:23 vm09 bash[34466]: cluster 2026-04-15T13:45:21.870287+0000 mgr.vm06.qbbldl (mgr.14229) 676 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:23.719 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:23.719 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 3m ago 9m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:23.719 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 9m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:23.719 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 9m - - 2026-04-15T13:45:23.719 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (9m) 3m ago 9m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:23.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:23 vm06 bash[28114]: cluster 2026-04-15T13:45:21.870287+0000 mgr.vm06.qbbldl (mgr.14229) 676 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:23.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:23 vm06 bash[28114]: cluster 2026-04-15T13:45:21.870287+0000 mgr.vm06.qbbldl (mgr.14229) 676 : cluster [DBG] pgmap v372: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:23.940 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:23.940 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:23.940 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:24 vm09 bash[34466]: audit 2026-04-15T13:45:23.490967+0000 mon.vm06 (mon.0) 1171 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:24 vm09 bash[34466]: audit 2026-04-15T13:45:23.490967+0000 mon.vm06 (mon.0) 1171 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:24 vm09 bash[34466]: audit 2026-04-15T13:45:23.936504+0000 mon.vm06 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.106:0/3526620980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:24 vm09 bash[34466]: audit 2026-04-15T13:45:23.936504+0000 mon.vm06 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.106:0/3526620980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:24 vm06 bash[28114]: audit 2026-04-15T13:45:23.490967+0000 mon.vm06 (mon.0) 1171 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:24 vm06 bash[28114]: audit 2026-04-15T13:45:23.490967+0000 mon.vm06 (mon.0) 1171 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:24 vm06 bash[28114]: audit 2026-04-15T13:45:23.936504+0000 mon.vm06 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.106:0/3526620980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:24 vm06 bash[28114]: audit 2026-04-15T13:45:23.936504+0000 mon.vm06 (mon.0) 1172 : audit [DBG] from='client.? 192.168.123.106:0/3526620980' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:25.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:25 vm06 bash[28114]: audit 2026-04-15T13:45:23.521446+0000 mgr.vm06.qbbldl (mgr.14229) 677 : audit [DBG] from='client.16022 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:25 vm06 bash[28114]: audit 2026-04-15T13:45:23.521446+0000 mgr.vm06.qbbldl (mgr.14229) 677 : audit [DBG] from='client.16022 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:25 vm06 bash[28114]: audit 2026-04-15T13:45:23.712254+0000 mgr.vm06.qbbldl (mgr.14229) 678 : audit [DBG] from='client.16026 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:25 vm06 bash[28114]: audit 2026-04-15T13:45:23.712254+0000 mgr.vm06.qbbldl (mgr.14229) 678 : audit [DBG] from='client.16026 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:25 vm06 bash[28114]: cluster 2026-04-15T13:45:23.870679+0000 mgr.vm06.qbbldl (mgr.14229) 679 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:25 vm06 bash[28114]: cluster 2026-04-15T13:45:23.870679+0000 mgr.vm06.qbbldl (mgr.14229) 679 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:25 vm09 bash[34466]: audit 2026-04-15T13:45:23.521446+0000 mgr.vm06.qbbldl (mgr.14229) 677 : audit [DBG] from='client.16022 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:25 vm09 bash[34466]: audit 2026-04-15T13:45:23.521446+0000 mgr.vm06.qbbldl (mgr.14229) 677 : audit [DBG] from='client.16022 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:25 vm09 bash[34466]: audit 2026-04-15T13:45:23.712254+0000 mgr.vm06.qbbldl (mgr.14229) 678 : audit [DBG] from='client.16026 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:25 vm09 bash[34466]: audit 2026-04-15T13:45:23.712254+0000 mgr.vm06.qbbldl (mgr.14229) 678 : audit [DBG] from='client.16026 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:25 vm09 bash[34466]: cluster 2026-04-15T13:45:23.870679+0000 mgr.vm06.qbbldl (mgr.14229) 679 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:25 vm09 bash[34466]: cluster 2026-04-15T13:45:23.870679+0000 mgr.vm06.qbbldl (mgr.14229) 679 : cluster [DBG] pgmap v373: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:27 vm06 bash[28114]: cluster 2026-04-15T13:45:25.871157+0000 mgr.vm06.qbbldl (mgr.14229) 680 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:27 vm06 bash[28114]: cluster 2026-04-15T13:45:25.871157+0000 mgr.vm06.qbbldl (mgr.14229) 680 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:27 vm09 bash[34466]: cluster 2026-04-15T13:45:25.871157+0000 mgr.vm06.qbbldl (mgr.14229) 680 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:27 vm09 bash[34466]: cluster 2026-04-15T13:45:25.871157+0000 mgr.vm06.qbbldl (mgr.14229) 680 : cluster [DBG] pgmap v374: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:29.143 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:29.321 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:29.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 3m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:29.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:29.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:45:29.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:29.551 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:29.551 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:29.551 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:29 vm06 bash[28114]: cluster 2026-04-15T13:45:27.871590+0000 mgr.vm06.qbbldl (mgr.14229) 681 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:29.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:29 vm06 bash[28114]: cluster 2026-04-15T13:45:27.871590+0000 mgr.vm06.qbbldl (mgr.14229) 681 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:29.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:29 vm09 bash[34466]: cluster 2026-04-15T13:45:27.871590+0000 mgr.vm06.qbbldl (mgr.14229) 681 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:29 vm09 bash[34466]: cluster 2026-04-15T13:45:27.871590+0000 mgr.vm06.qbbldl (mgr.14229) 681 : cluster [DBG] pgmap v375: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:30 vm06 bash[28114]: audit 2026-04-15T13:45:29.123255+0000 mgr.vm06.qbbldl (mgr.14229) 682 : audit [DBG] from='client.16034 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:30 vm06 bash[28114]: audit 2026-04-15T13:45:29.123255+0000 mgr.vm06.qbbldl (mgr.14229) 682 : audit [DBG] from='client.16034 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:30 vm06 bash[28114]: audit 2026-04-15T13:45:29.314426+0000 mgr.vm06.qbbldl (mgr.14229) 683 : audit [DBG] from='client.16038 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:30 vm06 bash[28114]: audit 2026-04-15T13:45:29.314426+0000 mgr.vm06.qbbldl (mgr.14229) 683 : audit [DBG] from='client.16038 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:30 vm06 bash[28114]: audit 2026-04-15T13:45:29.547155+0000 mon.vm06 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.106:0/2944473976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:30 vm06 bash[28114]: audit 2026-04-15T13:45:29.547155+0000 mon.vm06 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.106:0/2944473976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:30 vm09 bash[34466]: audit 2026-04-15T13:45:29.123255+0000 mgr.vm06.qbbldl (mgr.14229) 682 : audit [DBG] from='client.16034 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:30 vm09 bash[34466]: audit 2026-04-15T13:45:29.123255+0000 mgr.vm06.qbbldl (mgr.14229) 682 : audit [DBG] from='client.16034 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:30 vm09 bash[34466]: audit 2026-04-15T13:45:29.314426+0000 mgr.vm06.qbbldl (mgr.14229) 683 : audit [DBG] from='client.16038 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:30 vm09 bash[34466]: audit 2026-04-15T13:45:29.314426+0000 mgr.vm06.qbbldl (mgr.14229) 683 : audit [DBG] from='client.16038 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:30 vm09 bash[34466]: audit 2026-04-15T13:45:29.547155+0000 mon.vm06 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.106:0/2944473976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:30 vm09 bash[34466]: audit 2026-04-15T13:45:29.547155+0000 mon.vm06 (mon.0) 1173 : audit [DBG] from='client.? 192.168.123.106:0/2944473976' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:31 vm06 bash[28114]: cluster 2026-04-15T13:45:29.872022+0000 mgr.vm06.qbbldl (mgr.14229) 684 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:31.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:31 vm06 bash[28114]: cluster 2026-04-15T13:45:29.872022+0000 mgr.vm06.qbbldl (mgr.14229) 684 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:31.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:31 vm09 bash[34466]: cluster 2026-04-15T13:45:29.872022+0000 mgr.vm06.qbbldl (mgr.14229) 684 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:31 vm09 bash[34466]: cluster 2026-04-15T13:45:29.872022+0000 mgr.vm06.qbbldl (mgr.14229) 684 : cluster [DBG] pgmap v376: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:33 vm06 bash[28114]: cluster 2026-04-15T13:45:31.872496+0000 mgr.vm06.qbbldl (mgr.14229) 685 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:33 vm06 bash[28114]: cluster 2026-04-15T13:45:31.872496+0000 mgr.vm06.qbbldl (mgr.14229) 685 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:33 vm09 bash[34466]: cluster 2026-04-15T13:45:31.872496+0000 mgr.vm06.qbbldl (mgr.14229) 685 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:33 vm09 bash[34466]: cluster 2026-04-15T13:45:31.872496+0000 mgr.vm06.qbbldl (mgr.14229) 685 : cluster [DBG] pgmap v377: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:34.766 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:34.956 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:34.956 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 3m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:34.956 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (3m) 3m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:34.956 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:45:34.956 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:35.194 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:35.194 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:35.194 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:35 vm06 bash[28114]: cluster 2026-04-15T13:45:33.872915+0000 mgr.vm06.qbbldl (mgr.14229) 686 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:35 vm06 bash[28114]: cluster 2026-04-15T13:45:33.872915+0000 mgr.vm06.qbbldl (mgr.14229) 686 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:35 vm06 bash[28114]: audit 2026-04-15T13:45:35.190219+0000 mon.vm06 (mon.0) 1174 : audit [DBG] from='client.? 192.168.123.106:0/1634758607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:35 vm06 bash[28114]: audit 2026-04-15T13:45:35.190219+0000 mon.vm06 (mon.0) 1174 : audit [DBG] from='client.? 192.168.123.106:0/1634758607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:35 vm09 bash[34466]: cluster 2026-04-15T13:45:33.872915+0000 mgr.vm06.qbbldl (mgr.14229) 686 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:35.858 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:35 vm09 bash[34466]: cluster 2026-04-15T13:45:33.872915+0000 mgr.vm06.qbbldl (mgr.14229) 686 : cluster [DBG] pgmap v378: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:35 vm09 bash[34466]: audit 2026-04-15T13:45:35.190219+0000 mon.vm06 (mon.0) 1174 : audit [DBG] from='client.? 192.168.123.106:0/1634758607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:35 vm09 bash[34466]: audit 2026-04-15T13:45:35.190219+0000 mon.vm06 (mon.0) 1174 : audit [DBG] from='client.? 192.168.123.106:0/1634758607' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:36 vm06 bash[28114]: audit 2026-04-15T13:45:34.745649+0000 mgr.vm06.qbbldl (mgr.14229) 687 : audit [DBG] from='client.16046 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:36 vm06 bash[28114]: audit 2026-04-15T13:45:34.745649+0000 mgr.vm06.qbbldl (mgr.14229) 687 : audit [DBG] from='client.16046 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:36 vm06 bash[28114]: audit 2026-04-15T13:45:34.949445+0000 mgr.vm06.qbbldl (mgr.14229) 688 : audit [DBG] from='client.16050 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:36 vm06 bash[28114]: audit 2026-04-15T13:45:34.949445+0000 mgr.vm06.qbbldl (mgr.14229) 688 : audit [DBG] from='client.16050 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:36 vm09 bash[34466]: audit 2026-04-15T13:45:34.745649+0000 mgr.vm06.qbbldl (mgr.14229) 687 : audit [DBG] from='client.16046 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:36 vm09 bash[34466]: audit 2026-04-15T13:45:34.745649+0000 mgr.vm06.qbbldl (mgr.14229) 687 : audit [DBG] from='client.16046 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:36 vm09 bash[34466]: audit 2026-04-15T13:45:34.949445+0000 mgr.vm06.qbbldl (mgr.14229) 688 : audit [DBG] from='client.16050 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:36 vm09 bash[34466]: audit 2026-04-15T13:45:34.949445+0000 mgr.vm06.qbbldl (mgr.14229) 688 : audit [DBG] from='client.16050 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:37 vm06 bash[28114]: cluster 2026-04-15T13:45:35.873377+0000 mgr.vm06.qbbldl (mgr.14229) 689 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:37 vm06 bash[28114]: cluster 2026-04-15T13:45:35.873377+0000 mgr.vm06.qbbldl (mgr.14229) 689 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:37 vm09 bash[34466]: cluster 2026-04-15T13:45:35.873377+0000 mgr.vm06.qbbldl (mgr.14229) 689 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:37 vm09 bash[34466]: cluster 2026-04-15T13:45:35.873377+0000 mgr.vm06.qbbldl (mgr.14229) 689 : cluster [DBG] pgmap v379: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:38 vm06 bash[28114]: cluster 2026-04-15T13:45:37.873794+0000 mgr.vm06.qbbldl (mgr.14229) 690 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-15T13:45:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:38 vm06 bash[28114]: cluster 2026-04-15T13:45:37.873794+0000 mgr.vm06.qbbldl (mgr.14229) 690 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-15T13:45:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:38 vm09 bash[34466]: cluster 2026-04-15T13:45:37.873794+0000 mgr.vm06.qbbldl (mgr.14229) 690 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-15T13:45:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:38 vm09 bash[34466]: cluster 2026-04-15T13:45:37.873794+0000 mgr.vm06.qbbldl (mgr.14229) 690 : cluster [DBG] pgmap v380: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.0 KiB/s rd, 0 B/s wr, 9 op/s 2026-04-15T13:45:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:39 vm06 bash[28114]: audit 2026-04-15T13:45:38.491000+0000 mon.vm06 (mon.0) 1175 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:39 vm06 bash[28114]: audit 2026-04-15T13:45:38.491000+0000 mon.vm06 (mon.0) 1175 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:39 vm09 bash[34466]: audit 2026-04-15T13:45:38.491000+0000 mon.vm06 (mon.0) 1175 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:39 vm09 bash[34466]: audit 2026-04-15T13:45:38.491000+0000 mon.vm06 (mon.0) 1175 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:40.402 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:40.592 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:40.592 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 3m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:40.592 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 3m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:40.592 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:45:40.592 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:40 vm06 bash[28114]: cluster 2026-04-15T13:45:39.874258+0000 mgr.vm06.qbbldl (mgr.14229) 691 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 341 B/s wr, 37 op/s 2026-04-15T13:45:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:40 vm06 bash[28114]: cluster 2026-04-15T13:45:39.874258+0000 mgr.vm06.qbbldl (mgr.14229) 691 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 341 B/s wr, 37 op/s 2026-04-15T13:45:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:40 vm06 bash[28114]: audit 2026-04-15T13:45:40.381706+0000 mgr.vm06.qbbldl (mgr.14229) 692 : audit [DBG] from='client.16058 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:40 vm06 bash[28114]: audit 2026-04-15T13:45:40.381706+0000 mgr.vm06.qbbldl (mgr.14229) 692 : audit [DBG] from='client.16058 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:40.815 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:40.815 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:40.815 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:40 vm09 bash[34466]: cluster 2026-04-15T13:45:39.874258+0000 mgr.vm06.qbbldl (mgr.14229) 691 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 341 B/s wr, 37 op/s 2026-04-15T13:45:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:40 vm09 bash[34466]: cluster 2026-04-15T13:45:39.874258+0000 mgr.vm06.qbbldl (mgr.14229) 691 : cluster [DBG] pgmap v381: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 341 B/s wr, 37 op/s 2026-04-15T13:45:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:40 vm09 bash[34466]: audit 2026-04-15T13:45:40.381706+0000 mgr.vm06.qbbldl (mgr.14229) 692 : audit [DBG] from='client.16058 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:40 vm09 bash[34466]: audit 2026-04-15T13:45:40.381706+0000 mgr.vm06.qbbldl (mgr.14229) 692 : audit [DBG] from='client.16058 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:41 vm06 bash[28114]: audit 2026-04-15T13:45:40.584689+0000 mgr.vm06.qbbldl (mgr.14229) 693 : audit [DBG] from='client.16062 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:41 vm06 bash[28114]: audit 2026-04-15T13:45:40.584689+0000 mgr.vm06.qbbldl (mgr.14229) 693 : audit [DBG] from='client.16062 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:41 vm06 bash[28114]: audit 2026-04-15T13:45:40.811347+0000 mon.vm06 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.106:0/383245675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:41 vm06 bash[28114]: audit 2026-04-15T13:45:40.811347+0000 mon.vm06 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.106:0/383245675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:41 vm09 bash[34466]: audit 2026-04-15T13:45:40.584689+0000 mgr.vm06.qbbldl (mgr.14229) 693 : audit [DBG] from='client.16062 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:41 vm09 bash[34466]: audit 2026-04-15T13:45:40.584689+0000 mgr.vm06.qbbldl (mgr.14229) 693 : audit [DBG] from='client.16062 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:41 vm09 bash[34466]: audit 2026-04-15T13:45:40.811347+0000 mon.vm06 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.106:0/383245675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:41 vm09 bash[34466]: audit 2026-04-15T13:45:40.811347+0000 mon.vm06 (mon.0) 1176 : audit [DBG] from='client.? 192.168.123.106:0/383245675' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:42 vm06 bash[28114]: cluster 2026-04-15T13:45:41.874759+0000 mgr.vm06.qbbldl (mgr.14229) 694 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:42 vm06 bash[28114]: cluster 2026-04-15T13:45:41.874759+0000 mgr.vm06.qbbldl (mgr.14229) 694 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:42 vm09 bash[34466]: cluster 2026-04-15T13:45:41.874759+0000 mgr.vm06.qbbldl (mgr.14229) 694 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:42 vm09 bash[34466]: cluster 2026-04-15T13:45:41.874759+0000 mgr.vm06.qbbldl (mgr.14229) 694 : cluster [DBG] pgmap v382: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:45.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:44 vm06 bash[28114]: cluster 2026-04-15T13:45:43.875280+0000 mgr.vm06.qbbldl (mgr.14229) 695 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:45.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:44 vm06 bash[28114]: cluster 2026-04-15T13:45:43.875280+0000 mgr.vm06.qbbldl (mgr.14229) 695 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:44 vm09 bash[34466]: cluster 2026-04-15T13:45:43.875280+0000 mgr.vm06.qbbldl (mgr.14229) 695 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:44 vm09 bash[34466]: cluster 2026-04-15T13:45:43.875280+0000 mgr.vm06.qbbldl (mgr.14229) 695 : cluster [DBG] pgmap v383: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:46.023 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:46.209 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:46.209 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:46.209 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:46.209 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:45:46.209 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:46.438 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:46.438 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:46.438 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: cluster 2026-04-15T13:45:45.875755+0000 mgr.vm06.qbbldl (mgr.14229) 696 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: cluster 2026-04-15T13:45:45.875755+0000 mgr.vm06.qbbldl (mgr.14229) 696 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: audit 2026-04-15T13:45:46.003558+0000 mgr.vm06.qbbldl (mgr.14229) 697 : audit [DBG] from='client.25365 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: audit 2026-04-15T13:45:46.003558+0000 mgr.vm06.qbbldl (mgr.14229) 697 : audit [DBG] from='client.25365 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: audit 2026-04-15T13:45:46.202482+0000 mgr.vm06.qbbldl (mgr.14229) 698 : audit [DBG] from='client.16074 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: audit 2026-04-15T13:45:46.202482+0000 mgr.vm06.qbbldl (mgr.14229) 698 : audit [DBG] from='client.16074 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: audit 2026-04-15T13:45:46.434277+0000 mon.vm06 (mon.0) 1177 : audit [DBG] from='client.? 192.168.123.106:0/814346320' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:47.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:46 vm06 bash[28114]: audit 2026-04-15T13:45:46.434277+0000 mon.vm06 (mon.0) 1177 : audit [DBG] from='client.? 192.168.123.106:0/814346320' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: cluster 2026-04-15T13:45:45.875755+0000 mgr.vm06.qbbldl (mgr.14229) 696 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: cluster 2026-04-15T13:45:45.875755+0000 mgr.vm06.qbbldl (mgr.14229) 696 : cluster [DBG] pgmap v384: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: audit 2026-04-15T13:45:46.003558+0000 mgr.vm06.qbbldl (mgr.14229) 697 : audit [DBG] from='client.25365 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: audit 2026-04-15T13:45:46.003558+0000 mgr.vm06.qbbldl (mgr.14229) 697 : audit [DBG] from='client.25365 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: audit 2026-04-15T13:45:46.202482+0000 mgr.vm06.qbbldl (mgr.14229) 698 : audit [DBG] from='client.16074 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: audit 2026-04-15T13:45:46.202482+0000 mgr.vm06.qbbldl (mgr.14229) 698 : audit [DBG] from='client.16074 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: audit 2026-04-15T13:45:46.434277+0000 mon.vm06 (mon.0) 1177 : audit [DBG] from='client.? 192.168.123.106:0/814346320' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:46 vm09 bash[34466]: audit 2026-04-15T13:45:46.434277+0000 mon.vm06 (mon.0) 1177 : audit [DBG] from='client.? 192.168.123.106:0/814346320' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:48 vm06 bash[28114]: cluster 2026-04-15T13:45:47.876166+0000 mgr.vm06.qbbldl (mgr.14229) 699 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:48 vm06 bash[28114]: cluster 2026-04-15T13:45:47.876166+0000 mgr.vm06.qbbldl (mgr.14229) 699 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:49.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:48 vm09 bash[34466]: cluster 2026-04-15T13:45:47.876166+0000 mgr.vm06.qbbldl (mgr.14229) 699 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:49.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:48 vm09 bash[34466]: cluster 2026-04-15T13:45:47.876166+0000 mgr.vm06.qbbldl (mgr.14229) 699 : cluster [DBG] pgmap v385: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 341 B/s wr, 60 op/s 2026-04-15T13:45:51.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:50 vm06 bash[28114]: cluster 2026-04-15T13:45:49.876583+0000 mgr.vm06.qbbldl (mgr.14229) 700 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 341 B/s wr, 50 op/s 2026-04-15T13:45:51.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:50 vm06 bash[28114]: cluster 2026-04-15T13:45:49.876583+0000 mgr.vm06.qbbldl (mgr.14229) 700 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 341 B/s wr, 50 op/s 2026-04-15T13:45:51.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:50 vm09 bash[34466]: cluster 2026-04-15T13:45:49.876583+0000 mgr.vm06.qbbldl (mgr.14229) 700 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 341 B/s wr, 50 op/s 2026-04-15T13:45:51.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:50 vm09 bash[34466]: cluster 2026-04-15T13:45:49.876583+0000 mgr.vm06.qbbldl (mgr.14229) 700 : cluster [DBG] pgmap v386: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 30 KiB/s rd, 341 B/s wr, 50 op/s 2026-04-15T13:45:51.638 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:51.821 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:51.821 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:51.821 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:51.821 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:45:51.821 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:52.061 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:52.061 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:52.061 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: audit 2026-04-15T13:45:51.618205+0000 mgr.vm06.qbbldl (mgr.14229) 701 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: audit 2026-04-15T13:45:51.618205+0000 mgr.vm06.qbbldl (mgr.14229) 701 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: audit 2026-04-15T13:45:51.814944+0000 mgr.vm06.qbbldl (mgr.14229) 702 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: audit 2026-04-15T13:45:51.814944+0000 mgr.vm06.qbbldl (mgr.14229) 702 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: cluster 2026-04-15T13:45:51.877016+0000 mgr.vm06.qbbldl (mgr.14229) 703 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 22 op/s 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: cluster 2026-04-15T13:45:51.877016+0000 mgr.vm06.qbbldl (mgr.14229) 703 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 22 op/s 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: audit 2026-04-15T13:45:52.060460+0000 mon.vm09 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.106:0/1272022260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:53.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:52 vm06 bash[28114]: audit 2026-04-15T13:45:52.060460+0000 mon.vm09 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.106:0/1272022260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: audit 2026-04-15T13:45:51.618205+0000 mgr.vm06.qbbldl (mgr.14229) 701 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: audit 2026-04-15T13:45:51.618205+0000 mgr.vm06.qbbldl (mgr.14229) 701 : audit [DBG] from='client.16082 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: audit 2026-04-15T13:45:51.814944+0000 mgr.vm06.qbbldl (mgr.14229) 702 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: audit 2026-04-15T13:45:51.814944+0000 mgr.vm06.qbbldl (mgr.14229) 702 : audit [DBG] from='client.16086 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: cluster 2026-04-15T13:45:51.877016+0000 mgr.vm06.qbbldl (mgr.14229) 703 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 22 op/s 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: cluster 2026-04-15T13:45:51.877016+0000 mgr.vm06.qbbldl (mgr.14229) 703 : cluster [DBG] pgmap v387: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 13 KiB/s rd, 0 B/s wr, 22 op/s 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: audit 2026-04-15T13:45:52.060460+0000 mon.vm09 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.106:0/1272022260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:53.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:52 vm09 bash[34466]: audit 2026-04-15T13:45:52.060460+0000 mon.vm09 (mon.1) 35 : audit [DBG] from='client.? 192.168.123.106:0/1272022260' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:54.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:53 vm06 bash[28114]: audit 2026-04-15T13:45:53.491434+0000 mon.vm06 (mon.0) 1178 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:54.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:53 vm06 bash[28114]: audit 2026-04-15T13:45:53.491434+0000 mon.vm06 (mon.0) 1178 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:53 vm09 bash[34466]: audit 2026-04-15T13:45:53.491434+0000 mon.vm06 (mon.0) 1178 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:54.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:53 vm09 bash[34466]: audit 2026-04-15T13:45:53.491434+0000 mon.vm06 (mon.0) 1178 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:45:55.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:54 vm06 bash[28114]: cluster 2026-04-15T13:45:53.877454+0000 mgr.vm06.qbbldl (mgr.14229) 704 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:55.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:54 vm06 bash[28114]: cluster 2026-04-15T13:45:53.877454+0000 mgr.vm06.qbbldl (mgr.14229) 704 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:55.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:55 vm09 bash[34466]: cluster 2026-04-15T13:45:53.877454+0000 mgr.vm06.qbbldl (mgr.14229) 704 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:55.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:55 vm09 bash[34466]: cluster 2026-04-15T13:45:53.877454+0000 mgr.vm06.qbbldl (mgr.14229) 704 : cluster [DBG] pgmap v388: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:45:57.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:57 vm06 bash[28114]: cluster 2026-04-15T13:45:55.878196+0000 mgr.vm06.qbbldl (mgr.14229) 705 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:57.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:57 vm06 bash[28114]: cluster 2026-04-15T13:45:55.878196+0000 mgr.vm06.qbbldl (mgr.14229) 705 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:57.268 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:45:57.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:57 vm09 bash[34466]: cluster 2026-04-15T13:45:55.878196+0000 mgr.vm06.qbbldl (mgr.14229) 705 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:57.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:57 vm09 bash[34466]: cluster 2026-04-15T13:45:55.878196+0000 mgr.vm06.qbbldl (mgr.14229) 705 : cluster [DBG] pgmap v389: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:57.463 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:45:57.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:45:57.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:45:57.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:45:57.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:45:57.696 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:45:57.696 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:45:57.697 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:45:58.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:58 vm09 bash[34466]: audit 2026-04-15T13:45:57.244270+0000 mgr.vm06.qbbldl (mgr.14229) 706 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:58.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:58 vm09 bash[34466]: audit 2026-04-15T13:45:57.244270+0000 mgr.vm06.qbbldl (mgr.14229) 706 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:58.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:58 vm09 bash[34466]: audit 2026-04-15T13:45:57.692334+0000 mon.vm06 (mon.0) 1179 : audit [DBG] from='client.? 192.168.123.106:0/588081586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:58.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:58 vm09 bash[34466]: audit 2026-04-15T13:45:57.692334+0000 mon.vm06 (mon.0) 1179 : audit [DBG] from='client.? 192.168.123.106:0/588081586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:58.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:58 vm06 bash[28114]: audit 2026-04-15T13:45:57.244270+0000 mgr.vm06.qbbldl (mgr.14229) 706 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:58.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:58 vm06 bash[28114]: audit 2026-04-15T13:45:57.244270+0000 mgr.vm06.qbbldl (mgr.14229) 706 : audit [DBG] from='client.16094 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:58.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:58 vm06 bash[28114]: audit 2026-04-15T13:45:57.692334+0000 mon.vm06 (mon.0) 1179 : audit [DBG] from='client.? 192.168.123.106:0/588081586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:58.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:58 vm06 bash[28114]: audit 2026-04-15T13:45:57.692334+0000 mon.vm06 (mon.0) 1179 : audit [DBG] from='client.? 192.168.123.106:0/588081586' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:45:59.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:59 vm06 bash[28114]: audit 2026-04-15T13:45:57.456460+0000 mgr.vm06.qbbldl (mgr.14229) 707 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:59.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:59 vm06 bash[28114]: audit 2026-04-15T13:45:57.456460+0000 mgr.vm06.qbbldl (mgr.14229) 707 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:59.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:59 vm06 bash[28114]: cluster 2026-04-15T13:45:57.878597+0000 mgr.vm06.qbbldl (mgr.14229) 708 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:59.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:45:59 vm06 bash[28114]: cluster 2026-04-15T13:45:57.878597+0000 mgr.vm06.qbbldl (mgr.14229) 708 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:59.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:59 vm09 bash[34466]: audit 2026-04-15T13:45:57.456460+0000 mgr.vm06.qbbldl (mgr.14229) 707 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:59.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:59 vm09 bash[34466]: audit 2026-04-15T13:45:57.456460+0000 mgr.vm06.qbbldl (mgr.14229) 707 : audit [DBG] from='client.16098 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:45:59.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:59 vm09 bash[34466]: cluster 2026-04-15T13:45:57.878597+0000 mgr.vm06.qbbldl (mgr.14229) 708 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:45:59.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:45:59 vm09 bash[34466]: cluster 2026-04-15T13:45:57.878597+0000 mgr.vm06.qbbldl (mgr.14229) 708 : cluster [DBG] pgmap v390: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:01 vm09 bash[34466]: cluster 2026-04-15T13:45:59.879007+0000 mgr.vm06.qbbldl (mgr.14229) 709 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:01.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:01 vm09 bash[34466]: cluster 2026-04-15T13:45:59.879007+0000 mgr.vm06.qbbldl (mgr.14229) 709 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:01 vm06 bash[28114]: cluster 2026-04-15T13:45:59.879007+0000 mgr.vm06.qbbldl (mgr.14229) 709 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:01 vm06 bash[28114]: cluster 2026-04-15T13:45:59.879007+0000 mgr.vm06.qbbldl (mgr.14229) 709 : cluster [DBG] pgmap v391: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:02.908 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:03.087 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:03.087 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:03.087 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:03.087 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 3m ago 10m - - 2026-04-15T13:46:03.087 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 3m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:03.316 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:03.316 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:03.316 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:03.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:03 vm09 bash[34466]: cluster 2026-04-15T13:46:01.879416+0000 mgr.vm06.qbbldl (mgr.14229) 710 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:03.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:03 vm09 bash[34466]: cluster 2026-04-15T13:46:01.879416+0000 mgr.vm06.qbbldl (mgr.14229) 710 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:03.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:03 vm06 bash[28114]: cluster 2026-04-15T13:46:01.879416+0000 mgr.vm06.qbbldl (mgr.14229) 710 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:03.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:03 vm06 bash[28114]: cluster 2026-04-15T13:46:01.879416+0000 mgr.vm06.qbbldl (mgr.14229) 710 : cluster [DBG] pgmap v392: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:04 vm09 bash[34466]: audit 2026-04-15T13:46:02.887205+0000 mgr.vm06.qbbldl (mgr.14229) 711 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:04 vm09 bash[34466]: audit 2026-04-15T13:46:02.887205+0000 mgr.vm06.qbbldl (mgr.14229) 711 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:04 vm09 bash[34466]: audit 2026-04-15T13:46:03.079936+0000 mgr.vm06.qbbldl (mgr.14229) 712 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:04 vm09 bash[34466]: audit 2026-04-15T13:46:03.079936+0000 mgr.vm06.qbbldl (mgr.14229) 712 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:04 vm09 bash[34466]: audit 2026-04-15T13:46:03.311555+0000 mon.vm06 (mon.0) 1180 : audit [DBG] from='client.? 192.168.123.106:0/1173699983' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:04.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:04 vm09 bash[34466]: audit 2026-04-15T13:46:03.311555+0000 mon.vm06 (mon.0) 1180 : audit [DBG] from='client.? 192.168.123.106:0/1173699983' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:04 vm06 bash[28114]: audit 2026-04-15T13:46:02.887205+0000 mgr.vm06.qbbldl (mgr.14229) 711 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:04 vm06 bash[28114]: audit 2026-04-15T13:46:02.887205+0000 mgr.vm06.qbbldl (mgr.14229) 711 : audit [DBG] from='client.16106 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:04 vm06 bash[28114]: audit 2026-04-15T13:46:03.079936+0000 mgr.vm06.qbbldl (mgr.14229) 712 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:04 vm06 bash[28114]: audit 2026-04-15T13:46:03.079936+0000 mgr.vm06.qbbldl (mgr.14229) 712 : audit [DBG] from='client.16110 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:04 vm06 bash[28114]: audit 2026-04-15T13:46:03.311555+0000 mon.vm06 (mon.0) 1180 : audit [DBG] from='client.? 192.168.123.106:0/1173699983' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:04 vm06 bash[28114]: audit 2026-04-15T13:46:03.311555+0000 mon.vm06 (mon.0) 1180 : audit [DBG] from='client.? 192.168.123.106:0/1173699983' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:05 vm09 bash[34466]: cluster 2026-04-15T13:46:03.879821+0000 mgr.vm06.qbbldl (mgr.14229) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:05.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:05 vm09 bash[34466]: cluster 2026-04-15T13:46:03.879821+0000 mgr.vm06.qbbldl (mgr.14229) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:05.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:05 vm06 bash[28114]: cluster 2026-04-15T13:46:03.879821+0000 mgr.vm06.qbbldl (mgr.14229) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:05.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:05 vm06 bash[28114]: cluster 2026-04-15T13:46:03.879821+0000 mgr.vm06.qbbldl (mgr.14229) 713 : cluster [DBG] pgmap v393: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:06 vm09 bash[34466]: audit 2026-04-15T13:46:05.886780+0000 mon.vm06 (mon.0) 1181 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:06.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:06 vm09 bash[34466]: audit 2026-04-15T13:46:05.886780+0000 mon.vm06 (mon.0) 1181 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:06 vm06 bash[28114]: audit 2026-04-15T13:46:05.886780+0000 mon.vm06 (mon.0) 1181 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:06 vm06 bash[28114]: audit 2026-04-15T13:46:05.886780+0000 mon.vm06 (mon.0) 1181 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: cluster 2026-04-15T13:46:05.880243+0000 mgr.vm06.qbbldl (mgr.14229) 714 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: cluster 2026-04-15T13:46:05.880243+0000 mgr.vm06.qbbldl (mgr.14229) 714 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.240538+0000 mon.vm06 (mon.0) 1182 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.240538+0000 mon.vm06 (mon.0) 1182 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.241319+0000 mon.vm06 (mon.0) 1183 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.241319+0000 mon.vm06 (mon.0) 1183 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: cluster 2026-04-15T13:46:06.242476+0000 mgr.vm06.qbbldl (mgr.14229) 715 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: cluster 2026-04-15T13:46:06.242476+0000 mgr.vm06.qbbldl (mgr.14229) 715 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: cluster 2026-04-15T13:46:06.242678+0000 mgr.vm06.qbbldl (mgr.14229) 716 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: cluster 2026-04-15T13:46:06.242678+0000 mgr.vm06.qbbldl (mgr.14229) 716 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.247253+0000 mon.vm06 (mon.0) 1184 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.247253+0000 mon.vm06 (mon.0) 1184 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.249090+0000 mon.vm06 (mon.0) 1185 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:07.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:07 vm09 bash[34466]: audit 2026-04-15T13:46:06.249090+0000 mon.vm06 (mon.0) 1185 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: cluster 2026-04-15T13:46:05.880243+0000 mgr.vm06.qbbldl (mgr.14229) 714 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: cluster 2026-04-15T13:46:05.880243+0000 mgr.vm06.qbbldl (mgr.14229) 714 : cluster [DBG] pgmap v394: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.240538+0000 mon.vm06 (mon.0) 1182 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.240538+0000 mon.vm06 (mon.0) 1182 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.241319+0000 mon.vm06 (mon.0) 1183 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.241319+0000 mon.vm06 (mon.0) 1183 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: cluster 2026-04-15T13:46:06.242476+0000 mgr.vm06.qbbldl (mgr.14229) 715 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: cluster 2026-04-15T13:46:06.242476+0000 mgr.vm06.qbbldl (mgr.14229) 715 : cluster [DBG] pgmap v395: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: cluster 2026-04-15T13:46:06.242678+0000 mgr.vm06.qbbldl (mgr.14229) 716 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: cluster 2026-04-15T13:46:06.242678+0000 mgr.vm06.qbbldl (mgr.14229) 716 : cluster [DBG] pgmap v396: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.247253+0000 mon.vm06 (mon.0) 1184 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.247253+0000 mon.vm06 (mon.0) 1184 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.249090+0000 mon.vm06 (mon.0) 1185 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:07 vm06 bash[28114]: audit 2026-04-15T13:46:06.249090+0000 mon.vm06 (mon.0) 1185 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:08.533 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:08.714 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:08.715 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:08.715 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:08.715 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 10m - - 2026-04-15T13:46:08.715 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 4m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:08.953 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:08.953 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:08.953 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:09 vm09 bash[34466]: cluster 2026-04-15T13:46:08.243115+0000 mgr.vm06.qbbldl (mgr.14229) 717 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:09 vm09 bash[34466]: cluster 2026-04-15T13:46:08.243115+0000 mgr.vm06.qbbldl (mgr.14229) 717 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:09 vm09 bash[34466]: audit 2026-04-15T13:46:08.491481+0000 mon.vm06 (mon.0) 1186 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:09 vm09 bash[34466]: audit 2026-04-15T13:46:08.491481+0000 mon.vm06 (mon.0) 1186 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:09 vm09 bash[34466]: audit 2026-04-15T13:46:08.949342+0000 mon.vm06 (mon.0) 1187 : audit [DBG] from='client.? 192.168.123.106:0/155742736' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:09 vm09 bash[34466]: audit 2026-04-15T13:46:08.949342+0000 mon.vm06 (mon.0) 1187 : audit [DBG] from='client.? 192.168.123.106:0/155742736' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:09 vm06 bash[28114]: cluster 2026-04-15T13:46:08.243115+0000 mgr.vm06.qbbldl (mgr.14229) 717 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:09 vm06 bash[28114]: cluster 2026-04-15T13:46:08.243115+0000 mgr.vm06.qbbldl (mgr.14229) 717 : cluster [DBG] pgmap v397: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:09 vm06 bash[28114]: audit 2026-04-15T13:46:08.491481+0000 mon.vm06 (mon.0) 1186 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:09 vm06 bash[28114]: audit 2026-04-15T13:46:08.491481+0000 mon.vm06 (mon.0) 1186 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:09 vm06 bash[28114]: audit 2026-04-15T13:46:08.949342+0000 mon.vm06 (mon.0) 1187 : audit [DBG] from='client.? 192.168.123.106:0/155742736' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:09 vm06 bash[28114]: audit 2026-04-15T13:46:08.949342+0000 mon.vm06 (mon.0) 1187 : audit [DBG] from='client.? 192.168.123.106:0/155742736' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:10.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:10 vm09 bash[34466]: audit 2026-04-15T13:46:08.511039+0000 mgr.vm06.qbbldl (mgr.14229) 718 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:10 vm09 bash[34466]: audit 2026-04-15T13:46:08.511039+0000 mgr.vm06.qbbldl (mgr.14229) 718 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:10 vm09 bash[34466]: audit 2026-04-15T13:46:08.708018+0000 mgr.vm06.qbbldl (mgr.14229) 719 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:10 vm09 bash[34466]: audit 2026-04-15T13:46:08.708018+0000 mgr.vm06.qbbldl (mgr.14229) 719 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:10 vm06 bash[28114]: audit 2026-04-15T13:46:08.511039+0000 mgr.vm06.qbbldl (mgr.14229) 718 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:10 vm06 bash[28114]: audit 2026-04-15T13:46:08.511039+0000 mgr.vm06.qbbldl (mgr.14229) 718 : audit [DBG] from='client.16118 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:10 vm06 bash[28114]: audit 2026-04-15T13:46:08.708018+0000 mgr.vm06.qbbldl (mgr.14229) 719 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:10.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:10 vm06 bash[28114]: audit 2026-04-15T13:46:08.708018+0000 mgr.vm06.qbbldl (mgr.14229) 719 : audit [DBG] from='client.16122 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:11 vm09 bash[34466]: cluster 2026-04-15T13:46:10.243602+0000 mgr.vm06.qbbldl (mgr.14229) 720 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:11 vm09 bash[34466]: cluster 2026-04-15T13:46:10.243602+0000 mgr.vm06.qbbldl (mgr.14229) 720 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:11.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:11 vm06 bash[28114]: cluster 2026-04-15T13:46:10.243602+0000 mgr.vm06.qbbldl (mgr.14229) 720 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:11.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:11 vm06 bash[28114]: cluster 2026-04-15T13:46:10.243602+0000 mgr.vm06.qbbldl (mgr.14229) 720 : cluster [DBG] pgmap v398: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:13 vm09 bash[34466]: cluster 2026-04-15T13:46:12.243959+0000 mgr.vm06.qbbldl (mgr.14229) 721 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:13 vm09 bash[34466]: cluster 2026-04-15T13:46:12.243959+0000 mgr.vm06.qbbldl (mgr.14229) 721 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:13.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:13 vm06 bash[28114]: cluster 2026-04-15T13:46:12.243959+0000 mgr.vm06.qbbldl (mgr.14229) 721 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:13.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:13 vm06 bash[28114]: cluster 2026-04-15T13:46:12.243959+0000 mgr.vm06.qbbldl (mgr.14229) 721 : cluster [DBG] pgmap v399: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:14.159 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:14.335 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:14.335 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (9m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:14.335 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:14.335 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 10m - - 2026-04-15T13:46:14.335 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 4m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:14.573 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:14.574 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:14.574 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: audit 2026-04-15T13:46:14.138590+0000 mgr.vm06.qbbldl (mgr.14229) 722 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: audit 2026-04-15T13:46:14.138590+0000 mgr.vm06.qbbldl (mgr.14229) 722 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: cluster 2026-04-15T13:46:14.244309+0000 mgr.vm06.qbbldl (mgr.14229) 723 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: cluster 2026-04-15T13:46:14.244309+0000 mgr.vm06.qbbldl (mgr.14229) 723 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: audit 2026-04-15T13:46:14.327777+0000 mgr.vm06.qbbldl (mgr.14229) 724 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: audit 2026-04-15T13:46:14.327777+0000 mgr.vm06.qbbldl (mgr.14229) 724 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: audit 2026-04-15T13:46:14.569840+0000 mon.vm06 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.106:0/2551816503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:15.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:15 vm09 bash[34466]: audit 2026-04-15T13:46:14.569840+0000 mon.vm06 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.106:0/2551816503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: audit 2026-04-15T13:46:14.138590+0000 mgr.vm06.qbbldl (mgr.14229) 722 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: audit 2026-04-15T13:46:14.138590+0000 mgr.vm06.qbbldl (mgr.14229) 722 : audit [DBG] from='client.16130 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: cluster 2026-04-15T13:46:14.244309+0000 mgr.vm06.qbbldl (mgr.14229) 723 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: cluster 2026-04-15T13:46:14.244309+0000 mgr.vm06.qbbldl (mgr.14229) 723 : cluster [DBG] pgmap v400: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 244 B/s rd, 489 B/s wr, 0 op/s 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: audit 2026-04-15T13:46:14.327777+0000 mgr.vm06.qbbldl (mgr.14229) 724 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: audit 2026-04-15T13:46:14.327777+0000 mgr.vm06.qbbldl (mgr.14229) 724 : audit [DBG] from='client.16134 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: audit 2026-04-15T13:46:14.569840+0000 mon.vm06 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.106:0/2551816503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:15.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:15 vm06 bash[28114]: audit 2026-04-15T13:46:14.569840+0000 mon.vm06 (mon.0) 1188 : audit [DBG] from='client.? 192.168.123.106:0/2551816503' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:17 vm09 bash[34466]: cluster 2026-04-15T13:46:16.244741+0000 mgr.vm06.qbbldl (mgr.14229) 725 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:46:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:17 vm09 bash[34466]: cluster 2026-04-15T13:46:16.244741+0000 mgr.vm06.qbbldl (mgr.14229) 725 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:46:17.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:17 vm06 bash[28114]: cluster 2026-04-15T13:46:16.244741+0000 mgr.vm06.qbbldl (mgr.14229) 725 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:46:17.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:17 vm06 bash[28114]: cluster 2026-04-15T13:46:16.244741+0000 mgr.vm06.qbbldl (mgr.14229) 725 : cluster [DBG] pgmap v401: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 204 B/s rd, 409 B/s wr, 0 op/s 2026-04-15T13:46:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:19 vm06 bash[28114]: cluster 2026-04-15T13:46:18.245112+0000 mgr.vm06.qbbldl (mgr.14229) 726 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:19.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:19 vm06 bash[28114]: cluster 2026-04-15T13:46:18.245112+0000 mgr.vm06.qbbldl (mgr.14229) 726 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:19.799 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:19 vm09 bash[34466]: cluster 2026-04-15T13:46:18.245112+0000 mgr.vm06.qbbldl (mgr.14229) 726 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:19 vm09 bash[34466]: cluster 2026-04-15T13:46:18.245112+0000 mgr.vm06.qbbldl (mgr.14229) 726 : cluster [DBG] pgmap v402: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:19.996 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:19.996 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:19.996 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:19.996 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 10m - - 2026-04-15T13:46:19.997 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 4m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:20.259 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:20.259 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:20.259 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:20.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:20 vm06 bash[28114]: audit 2026-04-15T13:46:20.255555+0000 mon.vm06 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.106:0/4233053191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:20.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:20 vm06 bash[28114]: audit 2026-04-15T13:46:20.255555+0000 mon.vm06 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.106:0/4233053191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:20.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:20 vm09 bash[34466]: audit 2026-04-15T13:46:20.255555+0000 mon.vm06 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.106:0/4233053191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:20.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:20 vm09 bash[34466]: audit 2026-04-15T13:46:20.255555+0000 mon.vm06 (mon.0) 1189 : audit [DBG] from='client.? 192.168.123.106:0/4233053191' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:21 vm06 bash[28114]: audit 2026-04-15T13:46:19.778495+0000 mgr.vm06.qbbldl (mgr.14229) 727 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:21 vm06 bash[28114]: audit 2026-04-15T13:46:19.778495+0000 mgr.vm06.qbbldl (mgr.14229) 727 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:21 vm06 bash[28114]: audit 2026-04-15T13:46:19.990129+0000 mgr.vm06.qbbldl (mgr.14229) 728 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:21 vm06 bash[28114]: audit 2026-04-15T13:46:19.990129+0000 mgr.vm06.qbbldl (mgr.14229) 728 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:21 vm06 bash[28114]: cluster 2026-04-15T13:46:20.245637+0000 mgr.vm06.qbbldl (mgr.14229) 729 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 341 B/s wr, 3 op/s 2026-04-15T13:46:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:21 vm06 bash[28114]: cluster 2026-04-15T13:46:20.245637+0000 mgr.vm06.qbbldl (mgr.14229) 729 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 341 B/s wr, 3 op/s 2026-04-15T13:46:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:21 vm09 bash[34466]: audit 2026-04-15T13:46:19.778495+0000 mgr.vm06.qbbldl (mgr.14229) 727 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:21 vm09 bash[34466]: audit 2026-04-15T13:46:19.778495+0000 mgr.vm06.qbbldl (mgr.14229) 727 : audit [DBG] from='client.16142 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:21 vm09 bash[34466]: audit 2026-04-15T13:46:19.990129+0000 mgr.vm06.qbbldl (mgr.14229) 728 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:21 vm09 bash[34466]: audit 2026-04-15T13:46:19.990129+0000 mgr.vm06.qbbldl (mgr.14229) 728 : audit [DBG] from='client.16146 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:21 vm09 bash[34466]: cluster 2026-04-15T13:46:20.245637+0000 mgr.vm06.qbbldl (mgr.14229) 729 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 341 B/s wr, 3 op/s 2026-04-15T13:46:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:21 vm09 bash[34466]: cluster 2026-04-15T13:46:20.245637+0000 mgr.vm06.qbbldl (mgr.14229) 729 : cluster [DBG] pgmap v403: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.2 KiB/s rd, 341 B/s wr, 3 op/s 2026-04-15T13:46:23.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:23 vm06 bash[28114]: cluster 2026-04-15T13:46:22.245966+0000 mgr.vm06.qbbldl (mgr.14229) 730 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 0 B/s wr, 13 op/s 2026-04-15T13:46:23.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:23 vm06 bash[28114]: cluster 2026-04-15T13:46:22.245966+0000 mgr.vm06.qbbldl (mgr.14229) 730 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 0 B/s wr, 13 op/s 2026-04-15T13:46:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:23 vm09 bash[34466]: cluster 2026-04-15T13:46:22.245966+0000 mgr.vm06.qbbldl (mgr.14229) 730 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 0 B/s wr, 13 op/s 2026-04-15T13:46:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:23 vm09 bash[34466]: cluster 2026-04-15T13:46:22.245966+0000 mgr.vm06.qbbldl (mgr.14229) 730 : cluster [DBG] pgmap v404: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.0 KiB/s rd, 0 B/s wr, 13 op/s 2026-04-15T13:46:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:24 vm06 bash[28114]: audit 2026-04-15T13:46:23.491747+0000 mon.vm06 (mon.0) 1190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:24 vm06 bash[28114]: audit 2026-04-15T13:46:23.491747+0000 mon.vm06 (mon.0) 1190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:24.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:24 vm09 bash[34466]: audit 2026-04-15T13:46:23.491747+0000 mon.vm06 (mon.0) 1190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:24.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:24 vm09 bash[34466]: audit 2026-04-15T13:46:23.491747+0000 mon.vm06 (mon.0) 1190 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:25.474 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:25.651 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:25.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 4m ago 10m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:25.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 10m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:25.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 10m - - 2026-04-15T13:46:25.651 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (10m) 4m ago 10m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:25 vm06 bash[28114]: cluster 2026-04-15T13:46:24.246351+0000 mgr.vm06.qbbldl (mgr.14229) 731 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:46:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:25 vm06 bash[28114]: cluster 2026-04-15T13:46:24.246351+0000 mgr.vm06.qbbldl (mgr.14229) 731 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:46:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:25 vm09 bash[34466]: cluster 2026-04-15T13:46:24.246351+0000 mgr.vm06.qbbldl (mgr.14229) 731 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:46:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:25 vm09 bash[34466]: cluster 2026-04-15T13:46:24.246351+0000 mgr.vm06.qbbldl (mgr.14229) 731 : cluster [DBG] pgmap v405: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:46:25.890 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:25.890 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:25.890 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:26.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:26 vm06 bash[28114]: audit 2026-04-15T13:46:25.886353+0000 mon.vm06 (mon.0) 1191 : audit [DBG] from='client.? 192.168.123.106:0/3581979003' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:26.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:26 vm06 bash[28114]: audit 2026-04-15T13:46:25.886353+0000 mon.vm06 (mon.0) 1191 : audit [DBG] from='client.? 192.168.123.106:0/3581979003' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:26 vm09 bash[34466]: audit 2026-04-15T13:46:25.886353+0000 mon.vm06 (mon.0) 1191 : audit [DBG] from='client.? 192.168.123.106:0/3581979003' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:26.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:26 vm09 bash[34466]: audit 2026-04-15T13:46:25.886353+0000 mon.vm06 (mon.0) 1191 : audit [DBG] from='client.? 192.168.123.106:0/3581979003' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:27 vm06 bash[28114]: audit 2026-04-15T13:46:25.452977+0000 mgr.vm06.qbbldl (mgr.14229) 732 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:27 vm06 bash[28114]: audit 2026-04-15T13:46:25.452977+0000 mgr.vm06.qbbldl (mgr.14229) 732 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:27 vm06 bash[28114]: audit 2026-04-15T13:46:25.644167+0000 mgr.vm06.qbbldl (mgr.14229) 733 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:27 vm06 bash[28114]: audit 2026-04-15T13:46:25.644167+0000 mgr.vm06.qbbldl (mgr.14229) 733 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:27 vm06 bash[28114]: cluster 2026-04-15T13:46:26.246780+0000 mgr.vm06.qbbldl (mgr.14229) 734 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:27.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:27 vm06 bash[28114]: cluster 2026-04-15T13:46:26.246780+0000 mgr.vm06.qbbldl (mgr.14229) 734 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:27 vm09 bash[34466]: audit 2026-04-15T13:46:25.452977+0000 mgr.vm06.qbbldl (mgr.14229) 732 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:27 vm09 bash[34466]: audit 2026-04-15T13:46:25.452977+0000 mgr.vm06.qbbldl (mgr.14229) 732 : audit [DBG] from='client.16154 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:27 vm09 bash[34466]: audit 2026-04-15T13:46:25.644167+0000 mgr.vm06.qbbldl (mgr.14229) 733 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:27 vm09 bash[34466]: audit 2026-04-15T13:46:25.644167+0000 mgr.vm06.qbbldl (mgr.14229) 733 : audit [DBG] from='client.16158 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:27 vm09 bash[34466]: cluster 2026-04-15T13:46:26.246780+0000 mgr.vm06.qbbldl (mgr.14229) 734 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:27 vm09 bash[34466]: cluster 2026-04-15T13:46:26.246780+0000 mgr.vm06.qbbldl (mgr.14229) 734 : cluster [DBG] pgmap v406: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:29.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:29 vm06 bash[28114]: cluster 2026-04-15T13:46:28.247274+0000 mgr.vm06.qbbldl (mgr.14229) 735 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:29.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:29 vm06 bash[28114]: cluster 2026-04-15T13:46:28.247274+0000 mgr.vm06.qbbldl (mgr.14229) 735 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:29 vm09 bash[34466]: cluster 2026-04-15T13:46:28.247274+0000 mgr.vm06.qbbldl (mgr.14229) 735 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:29 vm09 bash[34466]: cluster 2026-04-15T13:46:28.247274+0000 mgr.vm06.qbbldl (mgr.14229) 735 : cluster [DBG] pgmap v407: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:31.096 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:31.290 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:31.290 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 4m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:31.290 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:31.290 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 11m - - 2026-04-15T13:46:31.290 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 4m ago 11m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:31.522 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:31.522 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:31.522 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:31.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:31 vm06 bash[28114]: cluster 2026-04-15T13:46:30.247885+0000 mgr.vm06.qbbldl (mgr.14229) 736 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:31.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:31 vm06 bash[28114]: cluster 2026-04-15T13:46:30.247885+0000 mgr.vm06.qbbldl (mgr.14229) 736 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:31 vm09 bash[34466]: cluster 2026-04-15T13:46:30.247885+0000 mgr.vm06.qbbldl (mgr.14229) 736 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:31 vm09 bash[34466]: cluster 2026-04-15T13:46:30.247885+0000 mgr.vm06.qbbldl (mgr.14229) 736 : cluster [DBG] pgmap v408: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:46:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:32 vm06 bash[28114]: audit 2026-04-15T13:46:31.075903+0000 mgr.vm06.qbbldl (mgr.14229) 737 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:32 vm06 bash[28114]: audit 2026-04-15T13:46:31.075903+0000 mgr.vm06.qbbldl (mgr.14229) 737 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:32 vm06 bash[28114]: audit 2026-04-15T13:46:31.283482+0000 mgr.vm06.qbbldl (mgr.14229) 738 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:32 vm06 bash[28114]: audit 2026-04-15T13:46:31.283482+0000 mgr.vm06.qbbldl (mgr.14229) 738 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:32 vm06 bash[28114]: audit 2026-04-15T13:46:31.518045+0000 mon.vm06 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.106:0/1236063879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:32 vm06 bash[28114]: audit 2026-04-15T13:46:31.518045+0000 mon.vm06 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.106:0/1236063879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:32 vm09 bash[34466]: audit 2026-04-15T13:46:31.075903+0000 mgr.vm06.qbbldl (mgr.14229) 737 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:32 vm09 bash[34466]: audit 2026-04-15T13:46:31.075903+0000 mgr.vm06.qbbldl (mgr.14229) 737 : audit [DBG] from='client.16166 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:32 vm09 bash[34466]: audit 2026-04-15T13:46:31.283482+0000 mgr.vm06.qbbldl (mgr.14229) 738 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:32 vm09 bash[34466]: audit 2026-04-15T13:46:31.283482+0000 mgr.vm06.qbbldl (mgr.14229) 738 : audit [DBG] from='client.16170 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:32 vm09 bash[34466]: audit 2026-04-15T13:46:31.518045+0000 mon.vm06 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.106:0/1236063879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:32 vm09 bash[34466]: audit 2026-04-15T13:46:31.518045+0000 mon.vm06 (mon.0) 1192 : audit [DBG] from='client.? 192.168.123.106:0/1236063879' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:33 vm06 bash[28114]: cluster 2026-04-15T13:46:32.248261+0000 mgr.vm06.qbbldl (mgr.14229) 739 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 34 KiB/s rd, 170 B/s wr, 56 op/s 2026-04-15T13:46:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:33 vm06 bash[28114]: cluster 2026-04-15T13:46:32.248261+0000 mgr.vm06.qbbldl (mgr.14229) 739 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 34 KiB/s rd, 170 B/s wr, 56 op/s 2026-04-15T13:46:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:33 vm09 bash[34466]: cluster 2026-04-15T13:46:32.248261+0000 mgr.vm06.qbbldl (mgr.14229) 739 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 34 KiB/s rd, 170 B/s wr, 56 op/s 2026-04-15T13:46:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:33 vm09 bash[34466]: cluster 2026-04-15T13:46:32.248261+0000 mgr.vm06.qbbldl (mgr.14229) 739 : cluster [DBG] pgmap v409: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 34 KiB/s rd, 170 B/s wr, 56 op/s 2026-04-15T13:46:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:35 vm06 bash[28114]: cluster 2026-04-15T13:46:34.248762+0000 mgr.vm06.qbbldl (mgr.14229) 740 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 170 B/s wr, 46 op/s 2026-04-15T13:46:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:35 vm06 bash[28114]: cluster 2026-04-15T13:46:34.248762+0000 mgr.vm06.qbbldl (mgr.14229) 740 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 170 B/s wr, 46 op/s 2026-04-15T13:46:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:35 vm09 bash[34466]: cluster 2026-04-15T13:46:34.248762+0000 mgr.vm06.qbbldl (mgr.14229) 740 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 170 B/s wr, 46 op/s 2026-04-15T13:46:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:35 vm09 bash[34466]: cluster 2026-04-15T13:46:34.248762+0000 mgr.vm06.qbbldl (mgr.14229) 740 : cluster [DBG] pgmap v410: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 28 KiB/s rd, 170 B/s wr, 46 op/s 2026-04-15T13:46:36.724 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:36.900 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:36.900 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 4m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:36.900 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (4m) 4m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:36.900 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 11m - - 2026-04-15T13:46:36.900 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 4m ago 11m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:37.120 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:37.120 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:37.120 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:37 vm06 bash[28114]: cluster 2026-04-15T13:46:36.249270+0000 mgr.vm06.qbbldl (mgr.14229) 741 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 18 op/s 2026-04-15T13:46:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:37 vm06 bash[28114]: cluster 2026-04-15T13:46:36.249270+0000 mgr.vm06.qbbldl (mgr.14229) 741 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 18 op/s 2026-04-15T13:46:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:37 vm06 bash[28114]: audit 2026-04-15T13:46:37.116282+0000 mon.vm06 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.106:0/1397587841' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:37 vm06 bash[28114]: audit 2026-04-15T13:46:37.116282+0000 mon.vm06 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.106:0/1397587841' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:37 vm09 bash[34466]: cluster 2026-04-15T13:46:36.249270+0000 mgr.vm06.qbbldl (mgr.14229) 741 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 18 op/s 2026-04-15T13:46:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:37 vm09 bash[34466]: cluster 2026-04-15T13:46:36.249270+0000 mgr.vm06.qbbldl (mgr.14229) 741 : cluster [DBG] pgmap v411: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 0 B/s wr, 18 op/s 2026-04-15T13:46:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:37 vm09 bash[34466]: audit 2026-04-15T13:46:37.116282+0000 mon.vm06 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.106:0/1397587841' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:37 vm09 bash[34466]: audit 2026-04-15T13:46:37.116282+0000 mon.vm06 (mon.0) 1193 : audit [DBG] from='client.? 192.168.123.106:0/1397587841' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:38 vm06 bash[28114]: audit 2026-04-15T13:46:36.704331+0000 mgr.vm06.qbbldl (mgr.14229) 742 : audit [DBG] from='client.16178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:38 vm06 bash[28114]: audit 2026-04-15T13:46:36.704331+0000 mgr.vm06.qbbldl (mgr.14229) 742 : audit [DBG] from='client.16178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:38 vm06 bash[28114]: audit 2026-04-15T13:46:36.893992+0000 mgr.vm06.qbbldl (mgr.14229) 743 : audit [DBG] from='client.16182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:38 vm06 bash[28114]: audit 2026-04-15T13:46:36.893992+0000 mgr.vm06.qbbldl (mgr.14229) 743 : audit [DBG] from='client.16182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:38 vm06 bash[28114]: cluster 2026-04-15T13:46:38.249772+0000 mgr.vm06.qbbldl (mgr.14229) 744 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:38 vm06 bash[28114]: cluster 2026-04-15T13:46:38.249772+0000 mgr.vm06.qbbldl (mgr.14229) 744 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:38 vm09 bash[34466]: audit 2026-04-15T13:46:36.704331+0000 mgr.vm06.qbbldl (mgr.14229) 742 : audit [DBG] from='client.16178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:38 vm09 bash[34466]: audit 2026-04-15T13:46:36.704331+0000 mgr.vm06.qbbldl (mgr.14229) 742 : audit [DBG] from='client.16178 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:38 vm09 bash[34466]: audit 2026-04-15T13:46:36.893992+0000 mgr.vm06.qbbldl (mgr.14229) 743 : audit [DBG] from='client.16182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:38 vm09 bash[34466]: audit 2026-04-15T13:46:36.893992+0000 mgr.vm06.qbbldl (mgr.14229) 743 : audit [DBG] from='client.16182 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:38 vm09 bash[34466]: cluster 2026-04-15T13:46:38.249772+0000 mgr.vm06.qbbldl (mgr.14229) 744 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:38 vm09 bash[34466]: cluster 2026-04-15T13:46:38.249772+0000 mgr.vm06.qbbldl (mgr.14229) 744 : cluster [DBG] pgmap v412: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:39 vm06 bash[28114]: audit 2026-04-15T13:46:38.491877+0000 mon.vm06 (mon.0) 1194 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:39 vm06 bash[28114]: audit 2026-04-15T13:46:38.491877+0000 mon.vm06 (mon.0) 1194 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:39 vm09 bash[34466]: audit 2026-04-15T13:46:38.491877+0000 mon.vm06 (mon.0) 1194 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:39 vm09 bash[34466]: audit 2026-04-15T13:46:38.491877+0000 mon.vm06 (mon.0) 1194 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:40 vm06 bash[28114]: cluster 2026-04-15T13:46:40.250262+0000 mgr.vm06.qbbldl (mgr.14229) 745 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:40 vm06 bash[28114]: cluster 2026-04-15T13:46:40.250262+0000 mgr.vm06.qbbldl (mgr.14229) 745 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:40 vm09 bash[34466]: cluster 2026-04-15T13:46:40.250262+0000 mgr.vm06.qbbldl (mgr.14229) 745 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:40 vm09 bash[34466]: cluster 2026-04-15T13:46:40.250262+0000 mgr.vm06.qbbldl (mgr.14229) 745 : cluster [DBG] pgmap v413: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:42.327 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:42.512 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:42.512 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 4m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:42.512 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 4m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:42.512 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 11m - - 2026-04-15T13:46:42.512 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 4m ago 11m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:42.739 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:42.739 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:42.739 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:43 vm09 bash[34466]: cluster 2026-04-15T13:46:42.250669+0000 mgr.vm06.qbbldl (mgr.14229) 746 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:43 vm09 bash[34466]: cluster 2026-04-15T13:46:42.250669+0000 mgr.vm06.qbbldl (mgr.14229) 746 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:43 vm09 bash[34466]: audit 2026-04-15T13:46:42.309848+0000 mgr.vm06.qbbldl (mgr.14229) 747 : audit [DBG] from='client.16190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:43 vm09 bash[34466]: audit 2026-04-15T13:46:42.309848+0000 mgr.vm06.qbbldl (mgr.14229) 747 : audit [DBG] from='client.16190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:43 vm09 bash[34466]: audit 2026-04-15T13:46:42.734957+0000 mon.vm06 (mon.0) 1195 : audit [DBG] from='client.? 192.168.123.106:0/249851663' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:43.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:43 vm09 bash[34466]: audit 2026-04-15T13:46:42.734957+0000 mon.vm06 (mon.0) 1195 : audit [DBG] from='client.? 192.168.123.106:0/249851663' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:43.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:43 vm06 bash[28114]: cluster 2026-04-15T13:46:42.250669+0000 mgr.vm06.qbbldl (mgr.14229) 746 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:43.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:43 vm06 bash[28114]: cluster 2026-04-15T13:46:42.250669+0000 mgr.vm06.qbbldl (mgr.14229) 746 : cluster [DBG] pgmap v414: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:43.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:43 vm06 bash[28114]: audit 2026-04-15T13:46:42.309848+0000 mgr.vm06.qbbldl (mgr.14229) 747 : audit [DBG] from='client.16190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:43.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:43 vm06 bash[28114]: audit 2026-04-15T13:46:42.309848+0000 mgr.vm06.qbbldl (mgr.14229) 747 : audit [DBG] from='client.16190 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:43.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:43 vm06 bash[28114]: audit 2026-04-15T13:46:42.734957+0000 mon.vm06 (mon.0) 1195 : audit [DBG] from='client.? 192.168.123.106:0/249851663' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:43.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:43 vm06 bash[28114]: audit 2026-04-15T13:46:42.734957+0000 mon.vm06 (mon.0) 1195 : audit [DBG] from='client.? 192.168.123.106:0/249851663' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:44 vm09 bash[34466]: audit 2026-04-15T13:46:42.505358+0000 mgr.vm06.qbbldl (mgr.14229) 748 : audit [DBG] from='client.16194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:44 vm09 bash[34466]: audit 2026-04-15T13:46:42.505358+0000 mgr.vm06.qbbldl (mgr.14229) 748 : audit [DBG] from='client.16194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:44.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:44 vm06 bash[28114]: audit 2026-04-15T13:46:42.505358+0000 mgr.vm06.qbbldl (mgr.14229) 748 : audit [DBG] from='client.16194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:44.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:44 vm06 bash[28114]: audit 2026-04-15T13:46:42.505358+0000 mgr.vm06.qbbldl (mgr.14229) 748 : audit [DBG] from='client.16194 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:45 vm06 bash[28114]: cluster 2026-04-15T13:46:44.251097+0000 mgr.vm06.qbbldl (mgr.14229) 749 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:45.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:45 vm06 bash[28114]: cluster 2026-04-15T13:46:44.251097+0000 mgr.vm06.qbbldl (mgr.14229) 749 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:45 vm09 bash[34466]: cluster 2026-04-15T13:46:44.251097+0000 mgr.vm06.qbbldl (mgr.14229) 749 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:45 vm09 bash[34466]: cluster 2026-04-15T13:46:44.251097+0000 mgr.vm06.qbbldl (mgr.14229) 749 : cluster [DBG] pgmap v415: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:47.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:47 vm06 bash[28114]: cluster 2026-04-15T13:46:46.251643+0000 mgr.vm06.qbbldl (mgr.14229) 750 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:47.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:47 vm06 bash[28114]: cluster 2026-04-15T13:46:46.251643+0000 mgr.vm06.qbbldl (mgr.14229) 750 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:47 vm09 bash[34466]: cluster 2026-04-15T13:46:46.251643+0000 mgr.vm06.qbbldl (mgr.14229) 750 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:47 vm09 bash[34466]: cluster 2026-04-15T13:46:46.251643+0000 mgr.vm06.qbbldl (mgr.14229) 750 : cluster [DBG] pgmap v416: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:47.949 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to stop 2026-04-15T13:46:48.138 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:48.139 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:48.139 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:48.139 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 11m - - 2026-04-15T13:46:48.139 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 4m ago 11m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:48.383 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:48.384 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:48.384 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:48.779 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:46:48.779 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:46:48.779 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:46:48.965 INFO:teuthology.orchestra.run.vm06.stdout:anonymousScheduled to start rgw.foo.vm09.iwshxg on host 'vm09' 2026-04-15T13:46:49.176 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.iwshxg to start 2026-04-15T13:46:49.355 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:49.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:49.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:49.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 error 4m ago 11m - - 2026-04-15T13:46:49.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 4m ago 11m 122M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:49.595 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:46:49.596 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:46:49.596 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.iwshxg on vm09 is in error state 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:47.930533+0000 mgr.vm06.qbbldl (mgr.14229) 751 : audit [DBG] from='client.16202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:47.930533+0000 mgr.vm06.qbbldl (mgr.14229) 751 : audit [DBG] from='client.16202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.132091+0000 mgr.vm06.qbbldl (mgr.14229) 752 : audit [DBG] from='client.16206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.132091+0000 mgr.vm06.qbbldl (mgr.14229) 752 : audit [DBG] from='client.16206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: cluster 2026-04-15T13:46:48.252138+0000 mgr.vm06.qbbldl (mgr.14229) 753 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: cluster 2026-04-15T13:46:48.252138+0000 mgr.vm06.qbbldl (mgr.14229) 753 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.379747+0000 mon.vm06 (mon.0) 1196 : audit [DBG] from='client.? 192.168.123.106:0/1918481988' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.379747+0000 mon.vm06 (mon.0) 1196 : audit [DBG] from='client.? 192.168.123.106:0/1918481988' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.957093+0000 mon.vm06 (mon.0) 1197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.957093+0000 mon.vm06 (mon.0) 1197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.961326+0000 mon.vm06 (mon.0) 1198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.961326+0000 mon.vm06 (mon.0) 1198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.962007+0000 mon.vm06 (mon.0) 1199 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.962007+0000 mon.vm06 (mon.0) 1199 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.963162+0000 mon.vm06 (mon.0) 1200 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:49.628 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.963162+0000 mon.vm06 (mon.0) 1200 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:49.629 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.963588+0000 mon.vm06 (mon.0) 1201 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:49.629 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.963588+0000 mon.vm06 (mon.0) 1201 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:49.629 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.967797+0000 mon.vm06 (mon.0) 1202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.629 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.967797+0000 mon.vm06 (mon.0) 1202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.629 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.969156+0000 mon.vm06 (mon.0) 1203 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:49.629 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:49 vm09 bash[34466]: audit 2026-04-15T13:46:48.969156+0000 mon.vm06 (mon.0) 1203 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:47.930533+0000 mgr.vm06.qbbldl (mgr.14229) 751 : audit [DBG] from='client.16202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:47.930533+0000 mgr.vm06.qbbldl (mgr.14229) 751 : audit [DBG] from='client.16202 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.132091+0000 mgr.vm06.qbbldl (mgr.14229) 752 : audit [DBG] from='client.16206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.132091+0000 mgr.vm06.qbbldl (mgr.14229) 752 : audit [DBG] from='client.16206 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: cluster 2026-04-15T13:46:48.252138+0000 mgr.vm06.qbbldl (mgr.14229) 753 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: cluster 2026-04-15T13:46:48.252138+0000 mgr.vm06.qbbldl (mgr.14229) 753 : cluster [DBG] pgmap v417: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.379747+0000 mon.vm06 (mon.0) 1196 : audit [DBG] from='client.? 192.168.123.106:0/1918481988' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.379747+0000 mon.vm06 (mon.0) 1196 : audit [DBG] from='client.? 192.168.123.106:0/1918481988' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.957093+0000 mon.vm06 (mon.0) 1197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.957093+0000 mon.vm06 (mon.0) 1197 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.961326+0000 mon.vm06 (mon.0) 1198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.961326+0000 mon.vm06 (mon.0) 1198 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.962007+0000 mon.vm06 (mon.0) 1199 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.962007+0000 mon.vm06 (mon.0) 1199 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.963162+0000 mon.vm06 (mon.0) 1200 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.963162+0000 mon.vm06 (mon.0) 1200 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.963588+0000 mon.vm06 (mon.0) 1201 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.963588+0000 mon.vm06 (mon.0) 1201 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.967797+0000 mon.vm06 (mon.0) 1202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.967797+0000 mon.vm06 (mon.0) 1202 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.969156+0000 mon.vm06 (mon.0) 1203 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:49 vm06 bash[28114]: audit 2026-04-15T13:46:48.969156+0000 mon.vm06 (mon.0) 1203 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:48.950691+0000 mgr.vm06.qbbldl (mgr.14229) 754 : audit [DBG] from='client.16214 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:48.950691+0000 mgr.vm06.qbbldl (mgr.14229) 754 : audit [DBG] from='client.16214 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: cephadm 2026-04-15T13:46:48.951119+0000 mgr.vm06.qbbldl (mgr.14229) 755 : cephadm [INF] Schedule start daemon rgw.foo.vm09.iwshxg 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: cephadm 2026-04-15T13:46:48.951119+0000 mgr.vm06.qbbldl (mgr.14229) 755 : cephadm [INF] Schedule start daemon rgw.foo.vm09.iwshxg 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: cluster 2026-04-15T13:46:48.964458+0000 mgr.vm06.qbbldl (mgr.14229) 756 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 191 B/s rd, 382 B/s wr, 0 op/s 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: cluster 2026-04-15T13:46:48.964458+0000 mgr.vm06.qbbldl (mgr.14229) 756 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 191 B/s rd, 382 B/s wr, 0 op/s 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: cluster 2026-04-15T13:46:48.964593+0000 mgr.vm06.qbbldl (mgr.14229) 757 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: cluster 2026-04-15T13:46:48.964593+0000 mgr.vm06.qbbldl (mgr.14229) 757 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.156656+0000 mgr.vm06.qbbldl (mgr.14229) 758 : audit [DBG] from='client.16218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.156656+0000 mgr.vm06.qbbldl (mgr.14229) 758 : audit [DBG] from='client.16218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.348679+0000 mgr.vm06.qbbldl (mgr.14229) 759 : audit [DBG] from='client.16222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.348679+0000 mgr.vm06.qbbldl (mgr.14229) 759 : audit [DBG] from='client.16222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.542602+0000 mon.vm06 (mon.0) 1204 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.542602+0000 mon.vm06 (mon.0) 1204 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.547400+0000 mon.vm06 (mon.0) 1205 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.547400+0000 mon.vm06 (mon.0) 1205 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.548420+0000 mon.vm06 (mon.0) 1206 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.548420+0000 mon.vm06 (mon.0) 1206 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.591740+0000 mon.vm06 (mon.0) 1207 : audit [DBG] from='client.? 192.168.123.106:0/2621081041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:50.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:50 vm06 bash[28114]: audit 2026-04-15T13:46:49.591740+0000 mon.vm06 (mon.0) 1207 : audit [DBG] from='client.? 192.168.123.106:0/2621081041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:48.950691+0000 mgr.vm06.qbbldl (mgr.14229) 754 : audit [DBG] from='client.16214 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:48.950691+0000 mgr.vm06.qbbldl (mgr.14229) 754 : audit [DBG] from='client.16214 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.iwshxg", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: cephadm 2026-04-15T13:46:48.951119+0000 mgr.vm06.qbbldl (mgr.14229) 755 : cephadm [INF] Schedule start daemon rgw.foo.vm09.iwshxg 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: cephadm 2026-04-15T13:46:48.951119+0000 mgr.vm06.qbbldl (mgr.14229) 755 : cephadm [INF] Schedule start daemon rgw.foo.vm09.iwshxg 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: cluster 2026-04-15T13:46:48.964458+0000 mgr.vm06.qbbldl (mgr.14229) 756 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 191 B/s rd, 382 B/s wr, 0 op/s 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: cluster 2026-04-15T13:46:48.964458+0000 mgr.vm06.qbbldl (mgr.14229) 756 : cluster [DBG] pgmap v418: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 191 B/s rd, 382 B/s wr, 0 op/s 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: cluster 2026-04-15T13:46:48.964593+0000 mgr.vm06.qbbldl (mgr.14229) 757 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: cluster 2026-04-15T13:46:48.964593+0000 mgr.vm06.qbbldl (mgr.14229) 757 : cluster [DBG] pgmap v419: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.156656+0000 mgr.vm06.qbbldl (mgr.14229) 758 : audit [DBG] from='client.16218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.156656+0000 mgr.vm06.qbbldl (mgr.14229) 758 : audit [DBG] from='client.16218 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.348679+0000 mgr.vm06.qbbldl (mgr.14229) 759 : audit [DBG] from='client.16222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.348679+0000 mgr.vm06.qbbldl (mgr.14229) 759 : audit [DBG] from='client.16222 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.542602+0000 mon.vm06 (mon.0) 1204 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.542602+0000 mon.vm06 (mon.0) 1204 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.547400+0000 mon.vm06 (mon.0) 1205 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.547400+0000 mon.vm06 (mon.0) 1205 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.548420+0000 mon.vm06 (mon.0) 1206 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.548420+0000 mon.vm06 (mon.0) 1206 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.591740+0000 mon.vm06 (mon.0) 1207 : audit [DBG] from='client.? 192.168.123.106:0/2621081041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:50.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:50 vm09 bash[34466]: audit 2026-04-15T13:46:49.591740+0000 mon.vm06 (mon.0) 1207 : audit [DBG] from='client.? 192.168.123.106:0/2621081041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:52.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:52 vm06 bash[28114]: cluster 2026-04-15T13:46:50.965147+0000 mgr.vm06.qbbldl (mgr.14229) 760 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:46:52.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:52 vm06 bash[28114]: cluster 2026-04-15T13:46:50.965147+0000 mgr.vm06.qbbldl (mgr.14229) 760 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:46:52.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:52 vm09 bash[34466]: cluster 2026-04-15T13:46:50.965147+0000 mgr.vm06.qbbldl (mgr.14229) 760 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:46:52.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:52 vm09 bash[34466]: cluster 2026-04-15T13:46:50.965147+0000 mgr.vm06.qbbldl (mgr.14229) 760 : cluster [DBG] pgmap v420: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.4 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: cluster 2026-04-15T13:46:52.965501+0000 mgr.vm06.qbbldl (mgr.14229) 761 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 26 op/s 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: cluster 2026-04-15T13:46:52.965501+0000 mgr.vm06.qbbldl (mgr.14229) 761 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 26 op/s 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:53.492294+0000 mon.vm06 (mon.0) 1208 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:53.492294+0000 mon.vm06 (mon.0) 1208 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.338767+0000 mon.vm06 (mon.0) 1209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.338767+0000 mon.vm06 (mon.0) 1209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.343513+0000 mon.vm06 (mon.0) 1210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.343513+0000 mon.vm06 (mon.0) 1210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.344223+0000 mon.vm06 (mon.0) 1211 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.344223+0000 mon.vm06 (mon.0) 1211 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.344639+0000 mon.vm06 (mon.0) 1212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.344639+0000 mon.vm06 (mon.0) 1212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.348251+0000 mon.vm06 (mon.0) 1213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.348251+0000 mon.vm06 (mon.0) 1213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.349581+0000 mon.vm06 (mon.0) 1214 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:54 vm06 bash[28114]: audit 2026-04-15T13:46:54.349581+0000 mon.vm06 (mon.0) 1214 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:54.791 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5s) 0s ago 11m 92.3M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: cluster 2026-04-15T13:46:52.965501+0000 mgr.vm06.qbbldl (mgr.14229) 761 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 26 op/s 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: cluster 2026-04-15T13:46:52.965501+0000 mgr.vm06.qbbldl (mgr.14229) 761 : cluster [DBG] pgmap v421: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 26 op/s 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:53.492294+0000 mon.vm06 (mon.0) 1208 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:53.492294+0000 mon.vm06 (mon.0) 1208 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.338767+0000 mon.vm06 (mon.0) 1209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.338767+0000 mon.vm06 (mon.0) 1209 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.343513+0000 mon.vm06 (mon.0) 1210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.343513+0000 mon.vm06 (mon.0) 1210 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.344223+0000 mon.vm06 (mon.0) 1211 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.344223+0000 mon.vm06 (mon.0) 1211 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.344639+0000 mon.vm06 (mon.0) 1212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.344639+0000 mon.vm06 (mon.0) 1212 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.348251+0000 mon.vm06 (mon.0) 1213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.348251+0000 mon.vm06 (mon.0) 1213 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.349581+0000 mon.vm06 (mon.0) 1214 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:54 vm09 bash[34466]: audit 2026-04-15T13:46:54.349581+0000 mon.vm06 (mon.0) 1214 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:54.978 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop rgw.foo.vm09.pxnsqu on host 'vm09' 2026-04-15T13:46:55.185 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:46:55.355 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:46:55.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:46:55.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:46:55.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5s) 1s ago 11m 92.3M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:46:55.355 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 1s ago 11m 130M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:46:55.583 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: cluster 2026-04-15T13:46:54.345647+0000 mgr.vm06.qbbldl (mgr.14229) 762 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 253 B/s wr, 105 op/s 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: cluster 2026-04-15T13:46:54.345647+0000 mgr.vm06.qbbldl (mgr.14229) 762 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 253 B/s wr, 105 op/s 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.969233+0000 mon.vm06 (mon.0) 1215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.969233+0000 mon.vm06 (mon.0) 1215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.973508+0000 mon.vm06 (mon.0) 1216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.973508+0000 mon.vm06 (mon.0) 1216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.974411+0000 mon.vm06 (mon.0) 1217 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.974411+0000 mon.vm06 (mon.0) 1217 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.975511+0000 mon.vm06 (mon.0) 1218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.975511+0000 mon.vm06 (mon.0) 1218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.975961+0000 mon.vm06 (mon.0) 1219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.975961+0000 mon.vm06 (mon.0) 1219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.979241+0000 mon.vm06 (mon.0) 1220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.979241+0000 mon.vm06 (mon.0) 1220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.980492+0000 mon.vm06 (mon.0) 1221 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: audit 2026-04-15T13:46:54.980492+0000 mon.vm06 (mon.0) 1221 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: cluster 2026-04-15T13:46:55.342551+0000 mon.vm06 (mon.0) 1222 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: cluster 2026-04-15T13:46:55.342551+0000 mon.vm06 (mon.0) 1222 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: cluster 2026-04-15T13:46:55.342574+0000 mon.vm06 (mon.0) 1223 : cluster [INF] Cluster is now healthy 2026-04-15T13:46:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:55 vm09 bash[34466]: cluster 2026-04-15T13:46:55.342574+0000 mon.vm06 (mon.0) 1223 : cluster [INF] Cluster is now healthy 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: cluster 2026-04-15T13:46:54.345647+0000 mgr.vm06.qbbldl (mgr.14229) 762 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 253 B/s wr, 105 op/s 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: cluster 2026-04-15T13:46:54.345647+0000 mgr.vm06.qbbldl (mgr.14229) 762 : cluster [DBG] pgmap v422: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 253 B/s wr, 105 op/s 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.969233+0000 mon.vm06 (mon.0) 1215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.969233+0000 mon.vm06 (mon.0) 1215 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.973508+0000 mon.vm06 (mon.0) 1216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.973508+0000 mon.vm06 (mon.0) 1216 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.974411+0000 mon.vm06 (mon.0) 1217 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.974411+0000 mon.vm06 (mon.0) 1217 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.975511+0000 mon.vm06 (mon.0) 1218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.975511+0000 mon.vm06 (mon.0) 1218 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.975961+0000 mon.vm06 (mon.0) 1219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.975961+0000 mon.vm06 (mon.0) 1219 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.979241+0000 mon.vm06 (mon.0) 1220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.979241+0000 mon.vm06 (mon.0) 1220 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.980492+0000 mon.vm06 (mon.0) 1221 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: audit 2026-04-15T13:46:54.980492+0000 mon.vm06 (mon.0) 1221 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: cluster 2026-04-15T13:46:55.342551+0000 mon.vm06 (mon.0) 1222 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: cluster 2026-04-15T13:46:55.342551+0000 mon.vm06 (mon.0) 1222 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: cluster 2026-04-15T13:46:55.342574+0000 mon.vm06 (mon.0) 1223 : cluster [INF] Cluster is now healthy 2026-04-15T13:46:55.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:55 vm06 bash[28114]: cluster 2026-04-15T13:46:55.342574+0000 mon.vm06 (mon.0) 1223 : cluster [INF] Cluster is now healthy 2026-04-15T13:46:56.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:54.770687+0000 mgr.vm06.qbbldl (mgr.14229) 763 : audit [DBG] from='client.16242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:54.770687+0000 mgr.vm06.qbbldl (mgr.14229) 763 : audit [DBG] from='client.16242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:54.963011+0000 mgr.vm06.qbbldl (mgr.14229) 764 : audit [DBG] from='client.16246 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:54.963011+0000 mgr.vm06.qbbldl (mgr.14229) 764 : audit [DBG] from='client.16246 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: cephadm 2026-04-15T13:46:54.963442+0000 mgr.vm06.qbbldl (mgr.14229) 765 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: cephadm 2026-04-15T13:46:54.963442+0000 mgr.vm06.qbbldl (mgr.14229) 765 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:55.165784+0000 mgr.vm06.qbbldl (mgr.14229) 766 : audit [DBG] from='client.16250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:55.165784+0000 mgr.vm06.qbbldl (mgr.14229) 766 : audit [DBG] from='client.16250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:55.348293+0000 mgr.vm06.qbbldl (mgr.14229) 767 : audit [DBG] from='client.16254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:55.348293+0000 mgr.vm06.qbbldl (mgr.14229) 767 : audit [DBG] from='client.16254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:55.579225+0000 mon.vm06 (mon.0) 1224 : audit [DBG] from='client.? 192.168.123.106:0/2974360385' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:56.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:56 vm06 bash[28114]: audit 2026-04-15T13:46:55.579225+0000 mon.vm06 (mon.0) 1224 : audit [DBG] from='client.? 192.168.123.106:0/2974360385' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:54.770687+0000 mgr.vm06.qbbldl (mgr.14229) 763 : audit [DBG] from='client.16242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:54.770687+0000 mgr.vm06.qbbldl (mgr.14229) 763 : audit [DBG] from='client.16242 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:54.963011+0000 mgr.vm06.qbbldl (mgr.14229) 764 : audit [DBG] from='client.16246 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:54.963011+0000 mgr.vm06.qbbldl (mgr.14229) 764 : audit [DBG] from='client.16246 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: cephadm 2026-04-15T13:46:54.963442+0000 mgr.vm06.qbbldl (mgr.14229) 765 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: cephadm 2026-04-15T13:46:54.963442+0000 mgr.vm06.qbbldl (mgr.14229) 765 : cephadm [INF] Schedule stop daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:55.165784+0000 mgr.vm06.qbbldl (mgr.14229) 766 : audit [DBG] from='client.16250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:55.165784+0000 mgr.vm06.qbbldl (mgr.14229) 766 : audit [DBG] from='client.16250 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:55.348293+0000 mgr.vm06.qbbldl (mgr.14229) 767 : audit [DBG] from='client.16254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:55.348293+0000 mgr.vm06.qbbldl (mgr.14229) 767 : audit [DBG] from='client.16254 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:55.579225+0000 mon.vm06 (mon.0) 1224 : audit [DBG] from='client.? 192.168.123.106:0/2974360385' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:56 vm09 bash[34466]: audit 2026-04-15T13:46:55.579225+0000 mon.vm06 (mon.0) 1224 : audit [DBG] from='client.? 192.168.123.106:0/2974360385' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:46:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:57 vm06 bash[28114]: cluster 2026-04-15T13:46:56.346033+0000 mgr.vm06.qbbldl (mgr.14229) 768 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 KiB/s rd, 253 B/s wr, 161 op/s 2026-04-15T13:46:57.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:57 vm06 bash[28114]: cluster 2026-04-15T13:46:56.346033+0000 mgr.vm06.qbbldl (mgr.14229) 768 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 KiB/s rd, 253 B/s wr, 161 op/s 2026-04-15T13:46:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:57 vm09 bash[34466]: cluster 2026-04-15T13:46:56.346033+0000 mgr.vm06.qbbldl (mgr.14229) 768 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 KiB/s rd, 253 B/s wr, 161 op/s 2026-04-15T13:46:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:57 vm09 bash[34466]: cluster 2026-04-15T13:46:56.346033+0000 mgr.vm06.qbbldl (mgr.14229) 768 : cluster [DBG] pgmap v423: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 99 KiB/s rd, 253 B/s wr, 161 op/s 2026-04-15T13:46:59.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:59 vm06 bash[28114]: cluster 2026-04-15T13:46:58.346441+0000 mgr.vm06.qbbldl (mgr.14229) 769 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 KiB/s rd, 218 B/s wr, 139 op/s 2026-04-15T13:46:59.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:46:59 vm06 bash[28114]: cluster 2026-04-15T13:46:58.346441+0000 mgr.vm06.qbbldl (mgr.14229) 769 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 KiB/s rd, 218 B/s wr, 139 op/s 2026-04-15T13:46:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:59 vm09 bash[34466]: cluster 2026-04-15T13:46:58.346441+0000 mgr.vm06.qbbldl (mgr.14229) 769 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 KiB/s rd, 218 B/s wr, 139 op/s 2026-04-15T13:46:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:46:59 vm09 bash[34466]: cluster 2026-04-15T13:46:58.346441+0000 mgr.vm06.qbbldl (mgr.14229) 769 : cluster [DBG] pgmap v424: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 KiB/s rd, 218 B/s wr, 139 op/s 2026-04-15T13:47:00.786 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:00.966 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:00.966 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:00.966 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:00.966 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (11s) 6s ago 11m 92.3M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:00.966 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 6s ago 11m 130M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:47:01.195 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:47:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:01 vm06 bash[28114]: cluster 2026-04-15T13:47:00.346922+0000 mgr.vm06.qbbldl (mgr.14229) 770 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 359 B/s wr, 115 op/s 2026-04-15T13:47:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:01 vm06 bash[28114]: cluster 2026-04-15T13:47:00.346922+0000 mgr.vm06.qbbldl (mgr.14229) 770 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 359 B/s wr, 115 op/s 2026-04-15T13:47:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:01 vm06 bash[28114]: audit 2026-04-15T13:47:01.191271+0000 mon.vm06 (mon.0) 1225 : audit [DBG] from='client.? 192.168.123.106:0/2603891695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:01 vm06 bash[28114]: audit 2026-04-15T13:47:01.191271+0000 mon.vm06 (mon.0) 1225 : audit [DBG] from='client.? 192.168.123.106:0/2603891695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:01 vm09 bash[34466]: cluster 2026-04-15T13:47:00.346922+0000 mgr.vm06.qbbldl (mgr.14229) 770 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 359 B/s wr, 115 op/s 2026-04-15T13:47:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:01 vm09 bash[34466]: cluster 2026-04-15T13:47:00.346922+0000 mgr.vm06.qbbldl (mgr.14229) 770 : cluster [DBG] pgmap v425: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 70 KiB/s rd, 359 B/s wr, 115 op/s 2026-04-15T13:47:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:01 vm09 bash[34466]: audit 2026-04-15T13:47:01.191271+0000 mon.vm06 (mon.0) 1225 : audit [DBG] from='client.? 192.168.123.106:0/2603891695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:01 vm09 bash[34466]: audit 2026-04-15T13:47:01.191271+0000 mon.vm06 (mon.0) 1225 : audit [DBG] from='client.? 192.168.123.106:0/2603891695' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:02 vm06 bash[28114]: audit 2026-04-15T13:47:00.765425+0000 mgr.vm06.qbbldl (mgr.14229) 771 : audit [DBG] from='client.16262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:02 vm06 bash[28114]: audit 2026-04-15T13:47:00.765425+0000 mgr.vm06.qbbldl (mgr.14229) 771 : audit [DBG] from='client.16262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:02 vm06 bash[28114]: audit 2026-04-15T13:47:00.959977+0000 mgr.vm06.qbbldl (mgr.14229) 772 : audit [DBG] from='client.16266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:02 vm06 bash[28114]: audit 2026-04-15T13:47:00.959977+0000 mgr.vm06.qbbldl (mgr.14229) 772 : audit [DBG] from='client.16266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:02 vm06 bash[28114]: cluster 2026-04-15T13:47:02.347345+0000 mgr.vm06.qbbldl (mgr.14229) 773 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 359 B/s wr, 113 op/s 2026-04-15T13:47:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:02 vm06 bash[28114]: cluster 2026-04-15T13:47:02.347345+0000 mgr.vm06.qbbldl (mgr.14229) 773 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 359 B/s wr, 113 op/s 2026-04-15T13:47:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:02 vm09 bash[34466]: audit 2026-04-15T13:47:00.765425+0000 mgr.vm06.qbbldl (mgr.14229) 771 : audit [DBG] from='client.16262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:02 vm09 bash[34466]: audit 2026-04-15T13:47:00.765425+0000 mgr.vm06.qbbldl (mgr.14229) 771 : audit [DBG] from='client.16262 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:02 vm09 bash[34466]: audit 2026-04-15T13:47:00.959977+0000 mgr.vm06.qbbldl (mgr.14229) 772 : audit [DBG] from='client.16266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:02 vm09 bash[34466]: audit 2026-04-15T13:47:00.959977+0000 mgr.vm06.qbbldl (mgr.14229) 772 : audit [DBG] from='client.16266 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:02 vm09 bash[34466]: cluster 2026-04-15T13:47:02.347345+0000 mgr.vm06.qbbldl (mgr.14229) 773 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 359 B/s wr, 113 op/s 2026-04-15T13:47:02.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:02 vm09 bash[34466]: cluster 2026-04-15T13:47:02.347345+0000 mgr.vm06.qbbldl (mgr.14229) 773 : cluster [DBG] pgmap v426: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 359 B/s wr, 113 op/s 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: cluster 2026-04-15T13:47:04.347753+0000 mgr.vm06.qbbldl (mgr.14229) 774 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 57 KiB/s rd, 359 B/s wr, 94 op/s 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: cluster 2026-04-15T13:47:04.347753+0000 mgr.vm06.qbbldl (mgr.14229) 774 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 57 KiB/s rd, 359 B/s wr, 94 op/s 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: audit 2026-04-15T13:47:05.390548+0000 mon.vm06 (mon.0) 1226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: audit 2026-04-15T13:47:05.390548+0000 mon.vm06 (mon.0) 1226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: audit 2026-04-15T13:47:05.394382+0000 mon.vm06 (mon.0) 1227 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: audit 2026-04-15T13:47:05.394382+0000 mon.vm06 (mon.0) 1227 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: audit 2026-04-15T13:47:05.395542+0000 mon.vm06 (mon.0) 1228 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:47:05.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:05 vm09 bash[34466]: audit 2026-04-15T13:47:05.395542+0000 mon.vm06 (mon.0) 1228 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: cluster 2026-04-15T13:47:04.347753+0000 mgr.vm06.qbbldl (mgr.14229) 774 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 57 KiB/s rd, 359 B/s wr, 94 op/s 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: cluster 2026-04-15T13:47:04.347753+0000 mgr.vm06.qbbldl (mgr.14229) 774 : cluster [DBG] pgmap v427: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 57 KiB/s rd, 359 B/s wr, 94 op/s 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: audit 2026-04-15T13:47:05.390548+0000 mon.vm06 (mon.0) 1226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: audit 2026-04-15T13:47:05.390548+0000 mon.vm06 (mon.0) 1226 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: audit 2026-04-15T13:47:05.394382+0000 mon.vm06 (mon.0) 1227 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: audit 2026-04-15T13:47:05.394382+0000 mon.vm06 (mon.0) 1227 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: audit 2026-04-15T13:47:05.395542+0000 mon.vm06 (mon.0) 1228 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:47:05.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:05 vm06 bash[28114]: audit 2026-04-15T13:47:05.395542+0000 mon.vm06 (mon.0) 1228 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:47:06.389 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:06.568 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:06.568 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:06.568 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:06.568 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (17s) 12s ago 11m 92.3M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:06.568 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (11m) 12s ago 11m 130M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b26b2a80bc96 2026-04-15T13:47:06.787 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:47:07.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:07 vm06 bash[28114]: cluster 2026-04-15T13:47:06.348170+0000 mgr.vm06.qbbldl (mgr.14229) 775 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 170 B/s wr, 38 op/s 2026-04-15T13:47:07.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:07 vm06 bash[28114]: cluster 2026-04-15T13:47:06.348170+0000 mgr.vm06.qbbldl (mgr.14229) 775 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 170 B/s wr, 38 op/s 2026-04-15T13:47:07.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:07 vm06 bash[28114]: audit 2026-04-15T13:47:06.370155+0000 mgr.vm06.qbbldl (mgr.14229) 776 : audit [DBG] from='client.16274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:07.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:07 vm06 bash[28114]: audit 2026-04-15T13:47:06.370155+0000 mgr.vm06.qbbldl (mgr.14229) 776 : audit [DBG] from='client.16274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:07.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:07 vm06 bash[28114]: audit 2026-04-15T13:47:06.783531+0000 mon.vm06 (mon.0) 1229 : audit [DBG] from='client.? 192.168.123.106:0/2186856830' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:07.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:07 vm06 bash[28114]: audit 2026-04-15T13:47:06.783531+0000 mon.vm06 (mon.0) 1229 : audit [DBG] from='client.? 192.168.123.106:0/2186856830' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:07 vm09 bash[34466]: cluster 2026-04-15T13:47:06.348170+0000 mgr.vm06.qbbldl (mgr.14229) 775 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 170 B/s wr, 38 op/s 2026-04-15T13:47:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:07 vm09 bash[34466]: cluster 2026-04-15T13:47:06.348170+0000 mgr.vm06.qbbldl (mgr.14229) 775 : cluster [DBG] pgmap v428: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 23 KiB/s rd, 170 B/s wr, 38 op/s 2026-04-15T13:47:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:07 vm09 bash[34466]: audit 2026-04-15T13:47:06.370155+0000 mgr.vm06.qbbldl (mgr.14229) 776 : audit [DBG] from='client.16274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:07 vm09 bash[34466]: audit 2026-04-15T13:47:06.370155+0000 mgr.vm06.qbbldl (mgr.14229) 776 : audit [DBG] from='client.16274 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:07 vm09 bash[34466]: audit 2026-04-15T13:47:06.783531+0000 mon.vm06 (mon.0) 1229 : audit [DBG] from='client.? 192.168.123.106:0/2186856830' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:07 vm09 bash[34466]: audit 2026-04-15T13:47:06.783531+0000 mon.vm06 (mon.0) 1229 : audit [DBG] from='client.? 192.168.123.106:0/2186856830' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:08 vm06 bash[28114]: audit 2026-04-15T13:47:06.562004+0000 mgr.vm06.qbbldl (mgr.14229) 777 : audit [DBG] from='client.16278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:08 vm06 bash[28114]: audit 2026-04-15T13:47:06.562004+0000 mgr.vm06.qbbldl (mgr.14229) 777 : audit [DBG] from='client.16278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:08 vm09 bash[34466]: audit 2026-04-15T13:47:06.562004+0000 mgr.vm06.qbbldl (mgr.14229) 777 : audit [DBG] from='client.16278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:08 vm09 bash[34466]: audit 2026-04-15T13:47:06.562004+0000 mgr.vm06.qbbldl (mgr.14229) 777 : audit [DBG] from='client.16278 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:09 vm06 bash[28114]: cluster 2026-04-15T13:47:08.348557+0000 mgr.vm06.qbbldl (mgr.14229) 778 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:09 vm06 bash[28114]: cluster 2026-04-15T13:47:08.348557+0000 mgr.vm06.qbbldl (mgr.14229) 778 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:09 vm06 bash[28114]: audit 2026-04-15T13:47:08.494806+0000 mon.vm06 (mon.0) 1230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:09 vm06 bash[28114]: audit 2026-04-15T13:47:08.494806+0000 mon.vm06 (mon.0) 1230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:09 vm06 bash[28114]: audit 2026-04-15T13:47:08.495398+0000 mon.vm06 (mon.0) 1231 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:09.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:09 vm06 bash[28114]: audit 2026-04-15T13:47:08.495398+0000 mon.vm06 (mon.0) 1231 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:09 vm09 bash[34466]: cluster 2026-04-15T13:47:08.348557+0000 mgr.vm06.qbbldl (mgr.14229) 778 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:09 vm09 bash[34466]: cluster 2026-04-15T13:47:08.348557+0000 mgr.vm06.qbbldl (mgr.14229) 778 : cluster [DBG] pgmap v429: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:09 vm09 bash[34466]: audit 2026-04-15T13:47:08.494806+0000 mon.vm06 (mon.0) 1230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:09 vm09 bash[34466]: audit 2026-04-15T13:47:08.494806+0000 mon.vm06 (mon.0) 1230 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:09 vm09 bash[34466]: audit 2026-04-15T13:47:08.495398+0000 mon.vm06 (mon.0) 1231 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:09 vm09 bash[34466]: audit 2026-04-15T13:47:08.495398+0000 mon.vm06 (mon.0) 1231 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.194469+0000 mon.vm06 (mon.0) 1232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.194469+0000 mon.vm06 (mon.0) 1232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.199731+0000 mon.vm06 (mon.0) 1233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.199731+0000 mon.vm06 (mon.0) 1233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: cluster 2026-04-15T13:47:10.348991+0000 mgr.vm06.qbbldl (mgr.14229) 779 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: cluster 2026-04-15T13:47:10.348991+0000 mgr.vm06.qbbldl (mgr.14229) 779 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.574433+0000 mon.vm06 (mon.0) 1234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.574433+0000 mon.vm06 (mon.0) 1234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.575293+0000 mon.vm06 (mon.0) 1235 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.575293+0000 mon.vm06 (mon.0) 1235 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.582268+0000 mon.vm06 (mon.0) 1236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.582268+0000 mon.vm06 (mon.0) 1236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.584179+0000 mon.vm06 (mon.0) 1237 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:47:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:11 vm06 bash[28114]: audit 2026-04-15T13:47:10.584179+0000 mon.vm06 (mon.0) 1237 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:47:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.194469+0000 mon.vm06 (mon.0) 1232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.194469+0000 mon.vm06 (mon.0) 1232 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.199731+0000 mon.vm06 (mon.0) 1233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.199731+0000 mon.vm06 (mon.0) 1233 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: cluster 2026-04-15T13:47:10.348991+0000 mgr.vm06.qbbldl (mgr.14229) 779 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: cluster 2026-04-15T13:47:10.348991+0000 mgr.vm06.qbbldl (mgr.14229) 779 : cluster [DBG] pgmap v430: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.574433+0000 mon.vm06 (mon.0) 1234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.574433+0000 mon.vm06 (mon.0) 1234 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.575293+0000 mon.vm06 (mon.0) 1235 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.575293+0000 mon.vm06 (mon.0) 1235 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.582268+0000 mon.vm06 (mon.0) 1236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.582268+0000 mon.vm06 (mon.0) 1236 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.584179+0000 mon.vm06 (mon.0) 1237 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:47:11.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:11 vm09 bash[34466]: audit 2026-04-15T13:47:10.584179+0000 mon.vm06 (mon.0) 1237 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:47:12.022 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:12.208 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:12.208 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:12.208 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:12.208 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (22s) 2s ago 11m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:12.208 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2s ago 11m - - 2026-04-15T13:47:12.471 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:12.471 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:12.471 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:12 vm06 bash[28114]: cluster 2026-04-15T13:47:10.576764+0000 mgr.vm06.qbbldl (mgr.14229) 780 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:12 vm06 bash[28114]: cluster 2026-04-15T13:47:10.576764+0000 mgr.vm06.qbbldl (mgr.14229) 780 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:12.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:12 vm06 bash[28114]: cluster 2026-04-15T13:47:11.200028+0000 mon.vm06 (mon.0) 1238 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:47:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:12 vm06 bash[28114]: cluster 2026-04-15T13:47:11.200028+0000 mon.vm06 (mon.0) 1238 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:47:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:12 vm09 bash[34466]: cluster 2026-04-15T13:47:10.576764+0000 mgr.vm06.qbbldl (mgr.14229) 780 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:12 vm09 bash[34466]: cluster 2026-04-15T13:47:10.576764+0000 mgr.vm06.qbbldl (mgr.14229) 780 : cluster [DBG] pgmap v431: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:12 vm09 bash[34466]: cluster 2026-04-15T13:47:11.200028+0000 mon.vm06 (mon.0) 1238 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:47:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:12 vm09 bash[34466]: cluster 2026-04-15T13:47:11.200028+0000 mon.vm06 (mon.0) 1238 : cluster [WRN] Health check failed: 1 failed cephadm daemon(s) (CEPHADM_FAILED_DAEMON) 2026-04-15T13:47:13.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:13 vm06 bash[28114]: audit 2026-04-15T13:47:12.001832+0000 mgr.vm06.qbbldl (mgr.14229) 781 : audit [DBG] from='client.16286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:13 vm06 bash[28114]: audit 2026-04-15T13:47:12.001832+0000 mgr.vm06.qbbldl (mgr.14229) 781 : audit [DBG] from='client.16286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:13 vm06 bash[28114]: audit 2026-04-15T13:47:12.200209+0000 mgr.vm06.qbbldl (mgr.14229) 782 : audit [DBG] from='client.16290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:13 vm06 bash[28114]: audit 2026-04-15T13:47:12.200209+0000 mgr.vm06.qbbldl (mgr.14229) 782 : audit [DBG] from='client.16290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:13 vm06 bash[28114]: audit 2026-04-15T13:47:12.467650+0000 mon.vm06 (mon.0) 1239 : audit [DBG] from='client.? 192.168.123.106:0/3591592222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:13.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:13 vm06 bash[28114]: audit 2026-04-15T13:47:12.467650+0000 mon.vm06 (mon.0) 1239 : audit [DBG] from='client.? 192.168.123.106:0/3591592222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:13 vm09 bash[34466]: audit 2026-04-15T13:47:12.001832+0000 mgr.vm06.qbbldl (mgr.14229) 781 : audit [DBG] from='client.16286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:13 vm09 bash[34466]: audit 2026-04-15T13:47:12.001832+0000 mgr.vm06.qbbldl (mgr.14229) 781 : audit [DBG] from='client.16286 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:13 vm09 bash[34466]: audit 2026-04-15T13:47:12.200209+0000 mgr.vm06.qbbldl (mgr.14229) 782 : audit [DBG] from='client.16290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:13 vm09 bash[34466]: audit 2026-04-15T13:47:12.200209+0000 mgr.vm06.qbbldl (mgr.14229) 782 : audit [DBG] from='client.16290 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:13 vm09 bash[34466]: audit 2026-04-15T13:47:12.467650+0000 mon.vm06 (mon.0) 1239 : audit [DBG] from='client.? 192.168.123.106:0/3591592222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:13.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:13 vm09 bash[34466]: audit 2026-04-15T13:47:12.467650+0000 mon.vm06 (mon.0) 1239 : audit [DBG] from='client.? 192.168.123.106:0/3591592222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:14.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:14 vm06 bash[28114]: cluster 2026-04-15T13:47:12.577136+0000 mgr.vm06.qbbldl (mgr.14229) 783 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:14.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:14 vm06 bash[28114]: cluster 2026-04-15T13:47:12.577136+0000 mgr.vm06.qbbldl (mgr.14229) 783 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:14 vm09 bash[34466]: cluster 2026-04-15T13:47:12.577136+0000 mgr.vm06.qbbldl (mgr.14229) 783 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:14.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:14 vm09 bash[34466]: cluster 2026-04-15T13:47:12.577136+0000 mgr.vm06.qbbldl (mgr.14229) 783 : cluster [DBG] pgmap v432: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:16.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:16 vm06 bash[28114]: cluster 2026-04-15T13:47:14.577550+0000 mgr.vm06.qbbldl (mgr.14229) 784 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:16.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:16 vm06 bash[28114]: cluster 2026-04-15T13:47:14.577550+0000 mgr.vm06.qbbldl (mgr.14229) 784 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:16 vm09 bash[34466]: cluster 2026-04-15T13:47:14.577550+0000 mgr.vm06.qbbldl (mgr.14229) 784 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:16 vm09 bash[34466]: cluster 2026-04-15T13:47:14.577550+0000 mgr.vm06.qbbldl (mgr.14229) 784 : cluster [DBG] pgmap v433: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:17.701 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:17.885 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:17.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (10m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:17.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:17.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (28s) 7s ago 11m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:17.886 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 7s ago 11m - - 2026-04-15T13:47:18.153 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:18.153 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:18.153 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:18.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:18 vm06 bash[28114]: cluster 2026-04-15T13:47:16.578045+0000 mgr.vm06.qbbldl (mgr.14229) 785 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:18.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:18 vm06 bash[28114]: cluster 2026-04-15T13:47:16.578045+0000 mgr.vm06.qbbldl (mgr.14229) 785 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:18.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:18 vm06 bash[28114]: audit 2026-04-15T13:47:18.149536+0000 mon.vm06 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.106:0/4254324575' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:18.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:18 vm06 bash[28114]: audit 2026-04-15T13:47:18.149536+0000 mon.vm06 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.106:0/4254324575' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:18 vm09 bash[34466]: cluster 2026-04-15T13:47:16.578045+0000 mgr.vm06.qbbldl (mgr.14229) 785 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:18 vm09 bash[34466]: cluster 2026-04-15T13:47:16.578045+0000 mgr.vm06.qbbldl (mgr.14229) 785 : cluster [DBG] pgmap v434: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:18 vm09 bash[34466]: audit 2026-04-15T13:47:18.149536+0000 mon.vm06 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.106:0/4254324575' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:18 vm09 bash[34466]: audit 2026-04-15T13:47:18.149536+0000 mon.vm06 (mon.0) 1240 : audit [DBG] from='client.? 192.168.123.106:0/4254324575' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:19.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:19 vm06 bash[28114]: audit 2026-04-15T13:47:17.676602+0000 mgr.vm06.qbbldl (mgr.14229) 786 : audit [DBG] from='client.16298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:19 vm06 bash[28114]: audit 2026-04-15T13:47:17.676602+0000 mgr.vm06.qbbldl (mgr.14229) 786 : audit [DBG] from='client.16298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:19 vm06 bash[28114]: audit 2026-04-15T13:47:17.879090+0000 mgr.vm06.qbbldl (mgr.14229) 787 : audit [DBG] from='client.16302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:19 vm06 bash[28114]: audit 2026-04-15T13:47:17.879090+0000 mgr.vm06.qbbldl (mgr.14229) 787 : audit [DBG] from='client.16302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:19 vm09 bash[34466]: audit 2026-04-15T13:47:17.676602+0000 mgr.vm06.qbbldl (mgr.14229) 786 : audit [DBG] from='client.16298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:19 vm09 bash[34466]: audit 2026-04-15T13:47:17.676602+0000 mgr.vm06.qbbldl (mgr.14229) 786 : audit [DBG] from='client.16298 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:19 vm09 bash[34466]: audit 2026-04-15T13:47:17.879090+0000 mgr.vm06.qbbldl (mgr.14229) 787 : audit [DBG] from='client.16302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:19.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:19 vm09 bash[34466]: audit 2026-04-15T13:47:17.879090+0000 mgr.vm06.qbbldl (mgr.14229) 787 : audit [DBG] from='client.16302 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:20.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:20 vm06 bash[28114]: cluster 2026-04-15T13:47:18.578469+0000 mgr.vm06.qbbldl (mgr.14229) 788 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:20.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:20 vm06 bash[28114]: cluster 2026-04-15T13:47:18.578469+0000 mgr.vm06.qbbldl (mgr.14229) 788 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:20 vm09 bash[34466]: cluster 2026-04-15T13:47:18.578469+0000 mgr.vm06.qbbldl (mgr.14229) 788 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:20.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:20 vm09 bash[34466]: cluster 2026-04-15T13:47:18.578469+0000 mgr.vm06.qbbldl (mgr.14229) 788 : cluster [DBG] pgmap v435: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 100 B/s rd, 200 B/s wr, 0 op/s 2026-04-15T13:47:22.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:22 vm06 bash[28114]: cluster 2026-04-15T13:47:20.578934+0000 mgr.vm06.qbbldl (mgr.14229) 789 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:22.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:22 vm06 bash[28114]: cluster 2026-04-15T13:47:20.578934+0000 mgr.vm06.qbbldl (mgr.14229) 789 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:22.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:22 vm09 bash[34466]: cluster 2026-04-15T13:47:20.578934+0000 mgr.vm06.qbbldl (mgr.14229) 789 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:22.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:22 vm09 bash[34466]: cluster 2026-04-15T13:47:20.578934+0000 mgr.vm06.qbbldl (mgr.14229) 789 : cluster [DBG] pgmap v436: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:23.370 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:23.555 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:23.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:23.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 11m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:23.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (33s) 13s ago 11m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:23.555 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 13s ago 11m - - 2026-04-15T13:47:23.796 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:23.796 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:23.796 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: cluster 2026-04-15T13:47:22.579306+0000 mgr.vm06.qbbldl (mgr.14229) 790 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: cluster 2026-04-15T13:47:22.579306+0000 mgr.vm06.qbbldl (mgr.14229) 790 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.349422+0000 mgr.vm06.qbbldl (mgr.14229) 791 : audit [DBG] from='client.16310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.349422+0000 mgr.vm06.qbbldl (mgr.14229) 791 : audit [DBG] from='client.16310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.497647+0000 mon.vm06 (mon.0) 1241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.497647+0000 mon.vm06 (mon.0) 1241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.498382+0000 mon.vm06 (mon.0) 1242 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.498382+0000 mon.vm06 (mon.0) 1242 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.792136+0000 mon.vm06 (mon.0) 1243 : audit [DBG] from='client.? 192.168.123.106:0/200195764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:24.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:24 vm09 bash[34466]: audit 2026-04-15T13:47:23.792136+0000 mon.vm06 (mon.0) 1243 : audit [DBG] from='client.? 192.168.123.106:0/200195764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: cluster 2026-04-15T13:47:22.579306+0000 mgr.vm06.qbbldl (mgr.14229) 790 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: cluster 2026-04-15T13:47:22.579306+0000 mgr.vm06.qbbldl (mgr.14229) 790 : cluster [DBG] pgmap v437: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.349422+0000 mgr.vm06.qbbldl (mgr.14229) 791 : audit [DBG] from='client.16310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.349422+0000 mgr.vm06.qbbldl (mgr.14229) 791 : audit [DBG] from='client.16310 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.497647+0000 mon.vm06 (mon.0) 1241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.497647+0000 mon.vm06 (mon.0) 1241 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.498382+0000 mon.vm06 (mon.0) 1242 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.498382+0000 mon.vm06 (mon.0) 1242 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.792136+0000 mon.vm06 (mon.0) 1243 : audit [DBG] from='client.? 192.168.123.106:0/200195764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:24.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:24 vm06 bash[28114]: audit 2026-04-15T13:47:23.792136+0000 mon.vm06 (mon.0) 1243 : audit [DBG] from='client.? 192.168.123.106:0/200195764' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:25 vm09 bash[34466]: audit 2026-04-15T13:47:23.548076+0000 mgr.vm06.qbbldl (mgr.14229) 792 : audit [DBG] from='client.16314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:25.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:25 vm09 bash[34466]: audit 2026-04-15T13:47:23.548076+0000 mgr.vm06.qbbldl (mgr.14229) 792 : audit [DBG] from='client.16314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:25 vm06 bash[28114]: audit 2026-04-15T13:47:23.548076+0000 mgr.vm06.qbbldl (mgr.14229) 792 : audit [DBG] from='client.16314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:25.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:25 vm06 bash[28114]: audit 2026-04-15T13:47:23.548076+0000 mgr.vm06.qbbldl (mgr.14229) 792 : audit [DBG] from='client.16314 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:26 vm09 bash[34466]: cluster 2026-04-15T13:47:24.579835+0000 mgr.vm06.qbbldl (mgr.14229) 793 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:26.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:26 vm09 bash[34466]: cluster 2026-04-15T13:47:24.579835+0000 mgr.vm06.qbbldl (mgr.14229) 793 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:26.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:26 vm06 bash[28114]: cluster 2026-04-15T13:47:24.579835+0000 mgr.vm06.qbbldl (mgr.14229) 793 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:26.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:26 vm06 bash[28114]: cluster 2026-04-15T13:47:24.579835+0000 mgr.vm06.qbbldl (mgr.14229) 793 : cluster [DBG] pgmap v438: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:28 vm09 bash[34466]: cluster 2026-04-15T13:47:26.580362+0000 mgr.vm06.qbbldl (mgr.14229) 794 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:28.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:28 vm09 bash[34466]: cluster 2026-04-15T13:47:26.580362+0000 mgr.vm06.qbbldl (mgr.14229) 794 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:28.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:28 vm06 bash[28114]: cluster 2026-04-15T13:47:26.580362+0000 mgr.vm06.qbbldl (mgr.14229) 794 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:28.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:28 vm06 bash[28114]: cluster 2026-04-15T13:47:26.580362+0000 mgr.vm06.qbbldl (mgr.14229) 794 : cluster [DBG] pgmap v439: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:29.021 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:29.209 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:29.210 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 5m ago 11m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:29.210 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:29.210 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (39s) 19s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:29.210 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 19s ago 12m - - 2026-04-15T13:47:29.440 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:29.440 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:29.440 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: cluster 2026-04-15T13:47:28.580959+0000 mgr.vm06.qbbldl (mgr.14229) 795 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: cluster 2026-04-15T13:47:28.580959+0000 mgr.vm06.qbbldl (mgr.14229) 795 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: audit 2026-04-15T13:47:28.997333+0000 mgr.vm06.qbbldl (mgr.14229) 796 : audit [DBG] from='client.16322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: audit 2026-04-15T13:47:28.997333+0000 mgr.vm06.qbbldl (mgr.14229) 796 : audit [DBG] from='client.16322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: audit 2026-04-15T13:47:29.203239+0000 mgr.vm06.qbbldl (mgr.14229) 797 : audit [DBG] from='client.16326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: audit 2026-04-15T13:47:29.203239+0000 mgr.vm06.qbbldl (mgr.14229) 797 : audit [DBG] from='client.16326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: audit 2026-04-15T13:47:29.436634+0000 mon.vm06 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.106:0/4289002222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:30.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:30 vm09 bash[34466]: audit 2026-04-15T13:47:29.436634+0000 mon.vm06 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.106:0/4289002222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: cluster 2026-04-15T13:47:28.580959+0000 mgr.vm06.qbbldl (mgr.14229) 795 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: cluster 2026-04-15T13:47:28.580959+0000 mgr.vm06.qbbldl (mgr.14229) 795 : cluster [DBG] pgmap v440: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: audit 2026-04-15T13:47:28.997333+0000 mgr.vm06.qbbldl (mgr.14229) 796 : audit [DBG] from='client.16322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: audit 2026-04-15T13:47:28.997333+0000 mgr.vm06.qbbldl (mgr.14229) 796 : audit [DBG] from='client.16322 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: audit 2026-04-15T13:47:29.203239+0000 mgr.vm06.qbbldl (mgr.14229) 797 : audit [DBG] from='client.16326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: audit 2026-04-15T13:47:29.203239+0000 mgr.vm06.qbbldl (mgr.14229) 797 : audit [DBG] from='client.16326 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: audit 2026-04-15T13:47:29.436634+0000 mon.vm06 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.106:0/4289002222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:30 vm06 bash[28114]: audit 2026-04-15T13:47:29.436634+0000 mon.vm06 (mon.0) 1244 : audit [DBG] from='client.? 192.168.123.106:0/4289002222' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:32 vm09 bash[34466]: cluster 2026-04-15T13:47:30.581378+0000 mgr.vm06.qbbldl (mgr.14229) 798 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:32.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:32 vm09 bash[34466]: cluster 2026-04-15T13:47:30.581378+0000 mgr.vm06.qbbldl (mgr.14229) 798 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:32.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:32 vm06 bash[28114]: cluster 2026-04-15T13:47:30.581378+0000 mgr.vm06.qbbldl (mgr.14229) 798 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:32 vm06 bash[28114]: cluster 2026-04-15T13:47:30.581378+0000 mgr.vm06.qbbldl (mgr.14229) 798 : cluster [DBG] pgmap v441: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:34 vm09 bash[34466]: cluster 2026-04-15T13:47:32.581734+0000 mgr.vm06.qbbldl (mgr.14229) 799 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:34 vm09 bash[34466]: cluster 2026-04-15T13:47:32.581734+0000 mgr.vm06.qbbldl (mgr.14229) 799 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:34.652 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:34 vm06 bash[28114]: cluster 2026-04-15T13:47:32.581734+0000 mgr.vm06.qbbldl (mgr.14229) 799 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:34 vm06 bash[28114]: cluster 2026-04-15T13:47:32.581734+0000 mgr.vm06.qbbldl (mgr.14229) 799 : cluster [DBG] pgmap v442: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:34.845 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:34.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 5m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:34.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (5m) 5m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:34.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (45s) 24s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:34.845 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 24s ago 12m - - 2026-04-15T13:47:35.082 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:35.082 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:35.082 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:35.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:35 vm09 bash[34466]: audit 2026-04-15T13:47:35.078229+0000 mon.vm06 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.106:0/975882657' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:35.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:35 vm09 bash[34466]: audit 2026-04-15T13:47:35.078229+0000 mon.vm06 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.106:0/975882657' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:35 vm06 bash[28114]: audit 2026-04-15T13:47:35.078229+0000 mon.vm06 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.106:0/975882657' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:35 vm06 bash[28114]: audit 2026-04-15T13:47:35.078229+0000 mon.vm06 (mon.0) 1245 : audit [DBG] from='client.? 192.168.123.106:0/975882657' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:36 vm09 bash[34466]: cluster 2026-04-15T13:47:34.582150+0000 mgr.vm06.qbbldl (mgr.14229) 800 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:36 vm09 bash[34466]: cluster 2026-04-15T13:47:34.582150+0000 mgr.vm06.qbbldl (mgr.14229) 800 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:36 vm09 bash[34466]: audit 2026-04-15T13:47:34.631610+0000 mgr.vm06.qbbldl (mgr.14229) 801 : audit [DBG] from='client.16334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:36 vm09 bash[34466]: audit 2026-04-15T13:47:34.631610+0000 mgr.vm06.qbbldl (mgr.14229) 801 : audit [DBG] from='client.16334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:36 vm09 bash[34466]: audit 2026-04-15T13:47:34.838613+0000 mgr.vm06.qbbldl (mgr.14229) 802 : audit [DBG] from='client.16338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:36 vm09 bash[34466]: audit 2026-04-15T13:47:34.838613+0000 mgr.vm06.qbbldl (mgr.14229) 802 : audit [DBG] from='client.16338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:36 vm06 bash[28114]: cluster 2026-04-15T13:47:34.582150+0000 mgr.vm06.qbbldl (mgr.14229) 800 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:36 vm06 bash[28114]: cluster 2026-04-15T13:47:34.582150+0000 mgr.vm06.qbbldl (mgr.14229) 800 : cluster [DBG] pgmap v443: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:36 vm06 bash[28114]: audit 2026-04-15T13:47:34.631610+0000 mgr.vm06.qbbldl (mgr.14229) 801 : audit [DBG] from='client.16334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:36 vm06 bash[28114]: audit 2026-04-15T13:47:34.631610+0000 mgr.vm06.qbbldl (mgr.14229) 801 : audit [DBG] from='client.16334 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:36 vm06 bash[28114]: audit 2026-04-15T13:47:34.838613+0000 mgr.vm06.qbbldl (mgr.14229) 802 : audit [DBG] from='client.16338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:36 vm06 bash[28114]: audit 2026-04-15T13:47:34.838613+0000 mgr.vm06.qbbldl (mgr.14229) 802 : audit [DBG] from='client.16338 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:38 vm06 bash[28114]: cluster 2026-04-15T13:47:36.582633+0000 mgr.vm06.qbbldl (mgr.14229) 803 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:38 vm06 bash[28114]: cluster 2026-04-15T13:47:36.582633+0000 mgr.vm06.qbbldl (mgr.14229) 803 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:38 vm09 bash[34466]: cluster 2026-04-15T13:47:36.582633+0000 mgr.vm06.qbbldl (mgr.14229) 803 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:38 vm09 bash[34466]: cluster 2026-04-15T13:47:36.582633+0000 mgr.vm06.qbbldl (mgr.14229) 803 : cluster [DBG] pgmap v444: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:39 vm06 bash[28114]: audit 2026-04-15T13:47:38.492815+0000 mon.vm06 (mon.0) 1246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:39 vm06 bash[28114]: audit 2026-04-15T13:47:38.492815+0000 mon.vm06 (mon.0) 1246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:39 vm09 bash[34466]: audit 2026-04-15T13:47:38.492815+0000 mon.vm06 (mon.0) 1246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:39 vm09 bash[34466]: audit 2026-04-15T13:47:38.492815+0000 mon.vm06 (mon.0) 1246 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:40.297 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:40.489 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:40.489 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 5m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:40.489 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 5m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:40.489 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (50s) 30s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:40.489 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 30s ago 12m - - 2026-04-15T13:47:40.735 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:40.736 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:40.736 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:40 vm06 bash[28114]: cluster 2026-04-15T13:47:38.583140+0000 mgr.vm06.qbbldl (mgr.14229) 804 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:40 vm06 bash[28114]: cluster 2026-04-15T13:47:38.583140+0000 mgr.vm06.qbbldl (mgr.14229) 804 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:40 vm09 bash[34466]: cluster 2026-04-15T13:47:38.583140+0000 mgr.vm06.qbbldl (mgr.14229) 804 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:40 vm09 bash[34466]: cluster 2026-04-15T13:47:38.583140+0000 mgr.vm06.qbbldl (mgr.14229) 804 : cluster [DBG] pgmap v445: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:41 vm06 bash[28114]: audit 2026-04-15T13:47:40.274631+0000 mgr.vm06.qbbldl (mgr.14229) 805 : audit [DBG] from='client.16346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:41 vm06 bash[28114]: audit 2026-04-15T13:47:40.274631+0000 mgr.vm06.qbbldl (mgr.14229) 805 : audit [DBG] from='client.16346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:41 vm06 bash[28114]: audit 2026-04-15T13:47:40.731698+0000 mon.vm06 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.106:0/309455338' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:41 vm06 bash[28114]: audit 2026-04-15T13:47:40.731698+0000 mon.vm06 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.106:0/309455338' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:41 vm09 bash[34466]: audit 2026-04-15T13:47:40.274631+0000 mgr.vm06.qbbldl (mgr.14229) 805 : audit [DBG] from='client.16346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:41 vm09 bash[34466]: audit 2026-04-15T13:47:40.274631+0000 mgr.vm06.qbbldl (mgr.14229) 805 : audit [DBG] from='client.16346 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:41 vm09 bash[34466]: audit 2026-04-15T13:47:40.731698+0000 mon.vm06 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.106:0/309455338' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:41 vm09 bash[34466]: audit 2026-04-15T13:47:40.731698+0000 mon.vm06 (mon.0) 1247 : audit [DBG] from='client.? 192.168.123.106:0/309455338' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:42.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:42 vm06 bash[28114]: audit 2026-04-15T13:47:40.481825+0000 mgr.vm06.qbbldl (mgr.14229) 806 : audit [DBG] from='client.16350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:42 vm06 bash[28114]: audit 2026-04-15T13:47:40.481825+0000 mgr.vm06.qbbldl (mgr.14229) 806 : audit [DBG] from='client.16350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:42 vm06 bash[28114]: cluster 2026-04-15T13:47:40.583555+0000 mgr.vm06.qbbldl (mgr.14229) 807 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:42.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:42 vm06 bash[28114]: cluster 2026-04-15T13:47:40.583555+0000 mgr.vm06.qbbldl (mgr.14229) 807 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:42 vm09 bash[34466]: audit 2026-04-15T13:47:40.481825+0000 mgr.vm06.qbbldl (mgr.14229) 806 : audit [DBG] from='client.16350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:42 vm09 bash[34466]: audit 2026-04-15T13:47:40.481825+0000 mgr.vm06.qbbldl (mgr.14229) 806 : audit [DBG] from='client.16350 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:42 vm09 bash[34466]: cluster 2026-04-15T13:47:40.583555+0000 mgr.vm06.qbbldl (mgr.14229) 807 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:42 vm09 bash[34466]: cluster 2026-04-15T13:47:40.583555+0000 mgr.vm06.qbbldl (mgr.14229) 807 : cluster [DBG] pgmap v446: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:44 vm06 bash[28114]: cluster 2026-04-15T13:47:42.583991+0000 mgr.vm06.qbbldl (mgr.14229) 808 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:44.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:44 vm06 bash[28114]: cluster 2026-04-15T13:47:42.583991+0000 mgr.vm06.qbbldl (mgr.14229) 808 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:44.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:44 vm09 bash[34466]: cluster 2026-04-15T13:47:42.583991+0000 mgr.vm06.qbbldl (mgr.14229) 808 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:44.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:44 vm09 bash[34466]: cluster 2026-04-15T13:47:42.583991+0000 mgr.vm06.qbbldl (mgr.14229) 808 : cluster [DBG] pgmap v447: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:45.960 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:46.160 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:46.160 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:46.160 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:46.160 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (56s) 35s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:46.160 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 35s ago 12m - - 2026-04-15T13:47:46.391 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:46.392 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:46.392 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:46 vm06 bash[28114]: cluster 2026-04-15T13:47:44.584418+0000 mgr.vm06.qbbldl (mgr.14229) 809 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:46.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:46 vm06 bash[28114]: cluster 2026-04-15T13:47:44.584418+0000 mgr.vm06.qbbldl (mgr.14229) 809 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:46.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:46 vm06 bash[28114]: audit 2026-04-15T13:47:46.387776+0000 mon.vm06 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.106:0/2027917684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:46.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:46 vm06 bash[28114]: audit 2026-04-15T13:47:46.387776+0000 mon.vm06 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.106:0/2027917684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:46 vm09 bash[34466]: cluster 2026-04-15T13:47:44.584418+0000 mgr.vm06.qbbldl (mgr.14229) 809 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:46 vm09 bash[34466]: cluster 2026-04-15T13:47:44.584418+0000 mgr.vm06.qbbldl (mgr.14229) 809 : cluster [DBG] pgmap v448: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:46 vm09 bash[34466]: audit 2026-04-15T13:47:46.387776+0000 mon.vm06 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.106:0/2027917684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:46 vm09 bash[34466]: audit 2026-04-15T13:47:46.387776+0000 mon.vm06 (mon.0) 1248 : audit [DBG] from='client.? 192.168.123.106:0/2027917684' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:47.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:47 vm06 bash[28114]: audit 2026-04-15T13:47:45.935930+0000 mgr.vm06.qbbldl (mgr.14229) 810 : audit [DBG] from='client.16358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:47 vm06 bash[28114]: audit 2026-04-15T13:47:45.935930+0000 mgr.vm06.qbbldl (mgr.14229) 810 : audit [DBG] from='client.16358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:47 vm06 bash[28114]: audit 2026-04-15T13:47:46.152823+0000 mgr.vm06.qbbldl (mgr.14229) 811 : audit [DBG] from='client.16362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:47 vm06 bash[28114]: audit 2026-04-15T13:47:46.152823+0000 mgr.vm06.qbbldl (mgr.14229) 811 : audit [DBG] from='client.16362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:47 vm09 bash[34466]: audit 2026-04-15T13:47:45.935930+0000 mgr.vm06.qbbldl (mgr.14229) 810 : audit [DBG] from='client.16358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:47 vm09 bash[34466]: audit 2026-04-15T13:47:45.935930+0000 mgr.vm06.qbbldl (mgr.14229) 810 : audit [DBG] from='client.16358 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:47 vm09 bash[34466]: audit 2026-04-15T13:47:46.152823+0000 mgr.vm06.qbbldl (mgr.14229) 811 : audit [DBG] from='client.16362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:47 vm09 bash[34466]: audit 2026-04-15T13:47:46.152823+0000 mgr.vm06.qbbldl (mgr.14229) 811 : audit [DBG] from='client.16362 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:48.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:48 vm06 bash[28114]: cluster 2026-04-15T13:47:46.585039+0000 mgr.vm06.qbbldl (mgr.14229) 812 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:48.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:48 vm06 bash[28114]: cluster 2026-04-15T13:47:46.585039+0000 mgr.vm06.qbbldl (mgr.14229) 812 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:48.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:48 vm09 bash[34466]: cluster 2026-04-15T13:47:46.585039+0000 mgr.vm06.qbbldl (mgr.14229) 812 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:48.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:48 vm09 bash[34466]: cluster 2026-04-15T13:47:46.585039+0000 mgr.vm06.qbbldl (mgr.14229) 812 : cluster [DBG] pgmap v449: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:49 vm06 bash[28114]: cluster 2026-04-15T13:47:48.585528+0000 mgr.vm06.qbbldl (mgr.14229) 813 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:49.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:49 vm06 bash[28114]: cluster 2026-04-15T13:47:48.585528+0000 mgr.vm06.qbbldl (mgr.14229) 813 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:49.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:49 vm09 bash[34466]: cluster 2026-04-15T13:47:48.585528+0000 mgr.vm06.qbbldl (mgr.14229) 813 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:49.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:49 vm09 bash[34466]: cluster 2026-04-15T13:47:48.585528+0000 mgr.vm06.qbbldl (mgr.14229) 813 : cluster [DBG] pgmap v450: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:51.615 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:51.810 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:51.810 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:51.810 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:51.810 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (62s) 41s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:51.810 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 41s ago 12m - - 2026-04-15T13:47:52.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:51 vm06 bash[28114]: cluster 2026-04-15T13:47:50.585948+0000 mgr.vm06.qbbldl (mgr.14229) 814 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:51 vm06 bash[28114]: cluster 2026-04-15T13:47:50.585948+0000 mgr.vm06.qbbldl (mgr.14229) 814 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:52.063 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:52.063 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:52.063 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:51 vm09 bash[34466]: cluster 2026-04-15T13:47:50.585948+0000 mgr.vm06.qbbldl (mgr.14229) 814 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:52.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:51 vm09 bash[34466]: cluster 2026-04-15T13:47:50.585948+0000 mgr.vm06.qbbldl (mgr.14229) 814 : cluster [DBG] pgmap v451: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:47:53.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:52 vm06 bash[28114]: audit 2026-04-15T13:47:51.591959+0000 mgr.vm06.qbbldl (mgr.14229) 815 : audit [DBG] from='client.16370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:52 vm06 bash[28114]: audit 2026-04-15T13:47:51.591959+0000 mgr.vm06.qbbldl (mgr.14229) 815 : audit [DBG] from='client.16370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:52 vm06 bash[28114]: audit 2026-04-15T13:47:51.803695+0000 mgr.vm06.qbbldl (mgr.14229) 816 : audit [DBG] from='client.16374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:52 vm06 bash[28114]: audit 2026-04-15T13:47:51.803695+0000 mgr.vm06.qbbldl (mgr.14229) 816 : audit [DBG] from='client.16374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:52 vm06 bash[28114]: audit 2026-04-15T13:47:52.059506+0000 mon.vm06 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.106:0/3536717882' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:53.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:52 vm06 bash[28114]: audit 2026-04-15T13:47:52.059506+0000 mon.vm06 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.106:0/3536717882' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:53.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:52 vm09 bash[34466]: audit 2026-04-15T13:47:51.591959+0000 mgr.vm06.qbbldl (mgr.14229) 815 : audit [DBG] from='client.16370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:52 vm09 bash[34466]: audit 2026-04-15T13:47:51.591959+0000 mgr.vm06.qbbldl (mgr.14229) 815 : audit [DBG] from='client.16370 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:52 vm09 bash[34466]: audit 2026-04-15T13:47:51.803695+0000 mgr.vm06.qbbldl (mgr.14229) 816 : audit [DBG] from='client.16374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:52 vm09 bash[34466]: audit 2026-04-15T13:47:51.803695+0000 mgr.vm06.qbbldl (mgr.14229) 816 : audit [DBG] from='client.16374 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:53.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:52 vm09 bash[34466]: audit 2026-04-15T13:47:52.059506+0000 mon.vm06 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.106:0/3536717882' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:53.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:52 vm09 bash[34466]: audit 2026-04-15T13:47:52.059506+0000 mon.vm06 (mon.0) 1249 : audit [DBG] from='client.? 192.168.123.106:0/3536717882' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:53 vm06 bash[28114]: cluster 2026-04-15T13:47:52.586366+0000 mgr.vm06.qbbldl (mgr.14229) 817 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:53 vm06 bash[28114]: cluster 2026-04-15T13:47:52.586366+0000 mgr.vm06.qbbldl (mgr.14229) 817 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:54.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:53 vm06 bash[28114]: audit 2026-04-15T13:47:53.493001+0000 mon.vm06 (mon.0) 1250 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:54.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:53 vm06 bash[28114]: audit 2026-04-15T13:47:53.493001+0000 mon.vm06 (mon.0) 1250 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:53 vm09 bash[34466]: cluster 2026-04-15T13:47:52.586366+0000 mgr.vm06.qbbldl (mgr.14229) 817 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:53 vm09 bash[34466]: cluster 2026-04-15T13:47:52.586366+0000 mgr.vm06.qbbldl (mgr.14229) 817 : cluster [DBG] pgmap v452: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:47:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:53 vm09 bash[34466]: audit 2026-04-15T13:47:53.493001+0000 mon.vm06 (mon.0) 1250 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:54.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:53 vm09 bash[34466]: audit 2026-04-15T13:47:53.493001+0000 mon.vm06 (mon.0) 1250 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:47:56.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:55 vm06 bash[28114]: cluster 2026-04-15T13:47:54.586777+0000 mgr.vm06.qbbldl (mgr.14229) 818 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:56.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:55 vm06 bash[28114]: cluster 2026-04-15T13:47:54.586777+0000 mgr.vm06.qbbldl (mgr.14229) 818 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:56.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:55 vm09 bash[34466]: cluster 2026-04-15T13:47:54.586777+0000 mgr.vm06.qbbldl (mgr.14229) 818 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:56.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:55 vm09 bash[34466]: cluster 2026-04-15T13:47:54.586777+0000 mgr.vm06.qbbldl (mgr.14229) 818 : cluster [DBG] pgmap v453: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:57.284 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:47:57.493 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:47:57.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:47:57.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:47:57.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (67s) 47s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:47:57.493 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 47s ago 12m - - 2026-04-15T13:47:57.727 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:47:57.727 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:47:57.727 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:47:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:57 vm06 bash[28114]: cluster 2026-04-15T13:47:56.587176+0000 mgr.vm06.qbbldl (mgr.14229) 819 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:57 vm06 bash[28114]: cluster 2026-04-15T13:47:56.587176+0000 mgr.vm06.qbbldl (mgr.14229) 819 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:57 vm06 bash[28114]: audit 2026-04-15T13:47:57.264397+0000 mgr.vm06.qbbldl (mgr.14229) 820 : audit [DBG] from='client.16382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:57 vm06 bash[28114]: audit 2026-04-15T13:47:57.264397+0000 mgr.vm06.qbbldl (mgr.14229) 820 : audit [DBG] from='client.16382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:58.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:57 vm09 bash[34466]: cluster 2026-04-15T13:47:56.587176+0000 mgr.vm06.qbbldl (mgr.14229) 819 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:58.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:57 vm09 bash[34466]: cluster 2026-04-15T13:47:56.587176+0000 mgr.vm06.qbbldl (mgr.14229) 819 : cluster [DBG] pgmap v454: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:47:58.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:57 vm09 bash[34466]: audit 2026-04-15T13:47:57.264397+0000 mgr.vm06.qbbldl (mgr.14229) 820 : audit [DBG] from='client.16382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:58.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:57 vm09 bash[34466]: audit 2026-04-15T13:47:57.264397+0000 mgr.vm06.qbbldl (mgr.14229) 820 : audit [DBG] from='client.16382 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:59.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:58 vm06 bash[28114]: audit 2026-04-15T13:47:57.486505+0000 mgr.vm06.qbbldl (mgr.14229) 821 : audit [DBG] from='client.16386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:59.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:58 vm06 bash[28114]: audit 2026-04-15T13:47:57.486505+0000 mgr.vm06.qbbldl (mgr.14229) 821 : audit [DBG] from='client.16386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:59.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:58 vm06 bash[28114]: audit 2026-04-15T13:47:57.723358+0000 mon.vm06 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.106:0/3344641548' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:59.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:58 vm06 bash[28114]: audit 2026-04-15T13:47:57.723358+0000 mon.vm06 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.106:0/3344641548' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:59.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:58 vm09 bash[34466]: audit 2026-04-15T13:47:57.486505+0000 mgr.vm06.qbbldl (mgr.14229) 821 : audit [DBG] from='client.16386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:59.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:58 vm09 bash[34466]: audit 2026-04-15T13:47:57.486505+0000 mgr.vm06.qbbldl (mgr.14229) 821 : audit [DBG] from='client.16386 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:47:59.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:58 vm09 bash[34466]: audit 2026-04-15T13:47:57.723358+0000 mon.vm06 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.106:0/3344641548' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:47:59.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:58 vm09 bash[34466]: audit 2026-04-15T13:47:57.723358+0000 mon.vm06 (mon.0) 1251 : audit [DBG] from='client.? 192.168.123.106:0/3344641548' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:00.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:59 vm06 bash[28114]: cluster 2026-04-15T13:47:58.587607+0000 mgr.vm06.qbbldl (mgr.14229) 822 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:00.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:47:59 vm06 bash[28114]: cluster 2026-04-15T13:47:58.587607+0000 mgr.vm06.qbbldl (mgr.14229) 822 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:59 vm09 bash[34466]: cluster 2026-04-15T13:47:58.587607+0000 mgr.vm06.qbbldl (mgr.14229) 822 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:00.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:47:59 vm09 bash[34466]: cluster 2026-04-15T13:47:58.587607+0000 mgr.vm06.qbbldl (mgr.14229) 822 : cluster [DBG] pgmap v455: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:02.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:01 vm06 bash[28114]: cluster 2026-04-15T13:48:00.588021+0000 mgr.vm06.qbbldl (mgr.14229) 823 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:02.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:01 vm06 bash[28114]: cluster 2026-04-15T13:48:00.588021+0000 mgr.vm06.qbbldl (mgr.14229) 823 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:02.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:01 vm09 bash[34466]: cluster 2026-04-15T13:48:00.588021+0000 mgr.vm06.qbbldl (mgr.14229) 823 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:02.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:01 vm09 bash[34466]: cluster 2026-04-15T13:48:00.588021+0000 mgr.vm06.qbbldl (mgr.14229) 823 : cluster [DBG] pgmap v456: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:02.940 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:03.143 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:03.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:03.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:03.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (73s) 52s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:03.143 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 52s ago 12m - - 2026-04-15T13:48:03.395 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:03.395 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:03.395 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: cluster 2026-04-15T13:48:02.588372+0000 mgr.vm06.qbbldl (mgr.14229) 824 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: cluster 2026-04-15T13:48:02.588372+0000 mgr.vm06.qbbldl (mgr.14229) 824 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: audit 2026-04-15T13:48:02.922414+0000 mgr.vm06.qbbldl (mgr.14229) 825 : audit [DBG] from='client.16394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: audit 2026-04-15T13:48:02.922414+0000 mgr.vm06.qbbldl (mgr.14229) 825 : audit [DBG] from='client.16394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: audit 2026-04-15T13:48:03.136485+0000 mgr.vm06.qbbldl (mgr.14229) 826 : audit [DBG] from='client.25569 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: audit 2026-04-15T13:48:03.136485+0000 mgr.vm06.qbbldl (mgr.14229) 826 : audit [DBG] from='client.25569 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: audit 2026-04-15T13:48:03.391211+0000 mon.vm06 (mon.0) 1252 : audit [DBG] from='client.? 192.168.123.106:0/301871593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:03 vm06 bash[28114]: audit 2026-04-15T13:48:03.391211+0000 mon.vm06 (mon.0) 1252 : audit [DBG] from='client.? 192.168.123.106:0/301871593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: cluster 2026-04-15T13:48:02.588372+0000 mgr.vm06.qbbldl (mgr.14229) 824 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: cluster 2026-04-15T13:48:02.588372+0000 mgr.vm06.qbbldl (mgr.14229) 824 : cluster [DBG] pgmap v457: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: audit 2026-04-15T13:48:02.922414+0000 mgr.vm06.qbbldl (mgr.14229) 825 : audit [DBG] from='client.16394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: audit 2026-04-15T13:48:02.922414+0000 mgr.vm06.qbbldl (mgr.14229) 825 : audit [DBG] from='client.16394 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: audit 2026-04-15T13:48:03.136485+0000 mgr.vm06.qbbldl (mgr.14229) 826 : audit [DBG] from='client.25569 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: audit 2026-04-15T13:48:03.136485+0000 mgr.vm06.qbbldl (mgr.14229) 826 : audit [DBG] from='client.25569 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: audit 2026-04-15T13:48:03.391211+0000 mon.vm06 (mon.0) 1252 : audit [DBG] from='client.? 192.168.123.106:0/301871593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:03 vm09 bash[34466]: audit 2026-04-15T13:48:03.391211+0000 mon.vm06 (mon.0) 1252 : audit [DBG] from='client.? 192.168.123.106:0/301871593' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:06.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:05 vm06 bash[28114]: cluster 2026-04-15T13:48:04.588777+0000 mgr.vm06.qbbldl (mgr.14229) 827 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:06.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:05 vm06 bash[28114]: cluster 2026-04-15T13:48:04.588777+0000 mgr.vm06.qbbldl (mgr.14229) 827 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:05 vm09 bash[34466]: cluster 2026-04-15T13:48:04.588777+0000 mgr.vm06.qbbldl (mgr.14229) 827 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:06.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:05 vm09 bash[34466]: cluster 2026-04-15T13:48:04.588777+0000 mgr.vm06.qbbldl (mgr.14229) 827 : cluster [DBG] pgmap v458: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:08.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:07 vm06 bash[28114]: cluster 2026-04-15T13:48:06.589203+0000 mgr.vm06.qbbldl (mgr.14229) 828 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:08.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:07 vm06 bash[28114]: cluster 2026-04-15T13:48:06.589203+0000 mgr.vm06.qbbldl (mgr.14229) 828 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:08.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:07 vm09 bash[34466]: cluster 2026-04-15T13:48:06.589203+0000 mgr.vm06.qbbldl (mgr.14229) 828 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:08.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:07 vm09 bash[34466]: cluster 2026-04-15T13:48:06.589203+0000 mgr.vm06.qbbldl (mgr.14229) 828 : cluster [DBG] pgmap v459: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:08.614 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:08.803 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:08.803 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:08.803 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:08.803 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (79s) 58s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:08.803 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 58s ago 12m - - 2026-04-15T13:48:09.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:08 vm06 bash[28114]: audit 2026-04-15T13:48:08.493361+0000 mon.vm06 (mon.0) 1253 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:09.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:08 vm06 bash[28114]: audit 2026-04-15T13:48:08.493361+0000 mon.vm06 (mon.0) 1253 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:09.043 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:09.044 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:09.044 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:09.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:08 vm09 bash[34466]: audit 2026-04-15T13:48:08.493361+0000 mon.vm06 (mon.0) 1253 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:09.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:08 vm09 bash[34466]: audit 2026-04-15T13:48:08.493361+0000 mon.vm06 (mon.0) 1253 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: cluster 2026-04-15T13:48:08.589668+0000 mgr.vm06.qbbldl (mgr.14229) 829 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: cluster 2026-04-15T13:48:08.589668+0000 mgr.vm06.qbbldl (mgr.14229) 829 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: audit 2026-04-15T13:48:08.592906+0000 mgr.vm06.qbbldl (mgr.14229) 830 : audit [DBG] from='client.16406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: audit 2026-04-15T13:48:08.592906+0000 mgr.vm06.qbbldl (mgr.14229) 830 : audit [DBG] from='client.16406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: audit 2026-04-15T13:48:08.795896+0000 mgr.vm06.qbbldl (mgr.14229) 831 : audit [DBG] from='client.16410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: audit 2026-04-15T13:48:08.795896+0000 mgr.vm06.qbbldl (mgr.14229) 831 : audit [DBG] from='client.16410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: audit 2026-04-15T13:48:09.040070+0000 mon.vm06 (mon.0) 1254 : audit [DBG] from='client.? 192.168.123.106:0/1523201724' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:09 vm06 bash[28114]: audit 2026-04-15T13:48:09.040070+0000 mon.vm06 (mon.0) 1254 : audit [DBG] from='client.? 192.168.123.106:0/1523201724' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: cluster 2026-04-15T13:48:08.589668+0000 mgr.vm06.qbbldl (mgr.14229) 829 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: cluster 2026-04-15T13:48:08.589668+0000 mgr.vm06.qbbldl (mgr.14229) 829 : cluster [DBG] pgmap v460: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: audit 2026-04-15T13:48:08.592906+0000 mgr.vm06.qbbldl (mgr.14229) 830 : audit [DBG] from='client.16406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: audit 2026-04-15T13:48:08.592906+0000 mgr.vm06.qbbldl (mgr.14229) 830 : audit [DBG] from='client.16406 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: audit 2026-04-15T13:48:08.795896+0000 mgr.vm06.qbbldl (mgr.14229) 831 : audit [DBG] from='client.16410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: audit 2026-04-15T13:48:08.795896+0000 mgr.vm06.qbbldl (mgr.14229) 831 : audit [DBG] from='client.16410 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: audit 2026-04-15T13:48:09.040070+0000 mon.vm06 (mon.0) 1254 : audit [DBG] from='client.? 192.168.123.106:0/1523201724' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:09 vm09 bash[34466]: audit 2026-04-15T13:48:09.040070+0000 mon.vm06 (mon.0) 1254 : audit [DBG] from='client.? 192.168.123.106:0/1523201724' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:10 vm09 bash[34466]: audit 2026-04-15T13:48:10.601490+0000 mon.vm06 (mon.0) 1255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:48:11.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:10 vm09 bash[34466]: audit 2026-04-15T13:48:10.601490+0000 mon.vm06 (mon.0) 1255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:48:11.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:10 vm06 bash[28114]: audit 2026-04-15T13:48:10.601490+0000 mon.vm06 (mon.0) 1255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:48:11.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:10 vm06 bash[28114]: audit 2026-04-15T13:48:10.601490+0000 mon.vm06 (mon.0) 1255 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: cluster 2026-04-15T13:48:10.590121+0000 mgr.vm06.qbbldl (mgr.14229) 832 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: cluster 2026-04-15T13:48:10.590121+0000 mgr.vm06.qbbldl (mgr.14229) 832 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.967419+0000 mon.vm06 (mon.0) 1256 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.967419+0000 mon.vm06 (mon.0) 1256 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.967965+0000 mon.vm06 (mon.0) 1257 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.967965+0000 mon.vm06 (mon.0) 1257 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: cluster 2026-04-15T13:48:10.968881+0000 mgr.vm06.qbbldl (mgr.14229) 833 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: cluster 2026-04-15T13:48:10.968881+0000 mgr.vm06.qbbldl (mgr.14229) 833 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: cluster 2026-04-15T13:48:10.968992+0000 mgr.vm06.qbbldl (mgr.14229) 834 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: cluster 2026-04-15T13:48:10.968992+0000 mgr.vm06.qbbldl (mgr.14229) 834 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.973386+0000 mon.vm06 (mon.0) 1258 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.973386+0000 mon.vm06 (mon.0) 1258 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.974711+0000 mon.vm06 (mon.0) 1259 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:48:12.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:11 vm09 bash[34466]: audit 2026-04-15T13:48:10.974711+0000 mon.vm06 (mon.0) 1259 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: cluster 2026-04-15T13:48:10.590121+0000 mgr.vm06.qbbldl (mgr.14229) 832 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: cluster 2026-04-15T13:48:10.590121+0000 mgr.vm06.qbbldl (mgr.14229) 832 : cluster [DBG] pgmap v461: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.967419+0000 mon.vm06 (mon.0) 1256 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.967419+0000 mon.vm06 (mon.0) 1256 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.967965+0000 mon.vm06 (mon.0) 1257 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.967965+0000 mon.vm06 (mon.0) 1257 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: cluster 2026-04-15T13:48:10.968881+0000 mgr.vm06.qbbldl (mgr.14229) 833 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: cluster 2026-04-15T13:48:10.968881+0000 mgr.vm06.qbbldl (mgr.14229) 833 : cluster [DBG] pgmap v462: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: cluster 2026-04-15T13:48:10.968992+0000 mgr.vm06.qbbldl (mgr.14229) 834 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: cluster 2026-04-15T13:48:10.968992+0000 mgr.vm06.qbbldl (mgr.14229) 834 : cluster [DBG] pgmap v463: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.973386+0000 mon.vm06 (mon.0) 1258 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.973386+0000 mon.vm06 (mon.0) 1258 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.974711+0000 mon.vm06 (mon.0) 1259 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:48:12.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:11 vm06 bash[28114]: audit 2026-04-15T13:48:10.974711+0000 mon.vm06 (mon.0) 1259 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:48:14.269 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:14.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:14 vm09 bash[34466]: cluster 2026-04-15T13:48:12.969338+0000 mgr.vm06.qbbldl (mgr.14229) 835 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:14.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:14 vm09 bash[34466]: cluster 2026-04-15T13:48:12.969338+0000 mgr.vm06.qbbldl (mgr.14229) 835 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:14.463 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:14.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (11m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:14.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:14.463 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (84s) 64s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:14.464 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 64s ago 12m - - 2026-04-15T13:48:14.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:14 vm06 bash[28114]: cluster 2026-04-15T13:48:12.969338+0000 mgr.vm06.qbbldl (mgr.14229) 835 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:14.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:14 vm06 bash[28114]: cluster 2026-04-15T13:48:12.969338+0000 mgr.vm06.qbbldl (mgr.14229) 835 : cluster [DBG] pgmap v464: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:14.698 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:14.699 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:14.699 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:15 vm09 bash[34466]: audit 2026-04-15T13:48:14.247483+0000 mgr.vm06.qbbldl (mgr.14229) 836 : audit [DBG] from='client.16418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:15 vm09 bash[34466]: audit 2026-04-15T13:48:14.247483+0000 mgr.vm06.qbbldl (mgr.14229) 836 : audit [DBG] from='client.16418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:15 vm09 bash[34466]: audit 2026-04-15T13:48:14.455913+0000 mgr.vm06.qbbldl (mgr.14229) 837 : audit [DBG] from='client.16422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:15 vm09 bash[34466]: audit 2026-04-15T13:48:14.455913+0000 mgr.vm06.qbbldl (mgr.14229) 837 : audit [DBG] from='client.16422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:15 vm09 bash[34466]: audit 2026-04-15T13:48:14.694821+0000 mon.vm06 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.106:0/2890904655' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:15 vm09 bash[34466]: audit 2026-04-15T13:48:14.694821+0000 mon.vm06 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.106:0/2890904655' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:15.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:15 vm06 bash[28114]: audit 2026-04-15T13:48:14.247483+0000 mgr.vm06.qbbldl (mgr.14229) 836 : audit [DBG] from='client.16418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:15 vm06 bash[28114]: audit 2026-04-15T13:48:14.247483+0000 mgr.vm06.qbbldl (mgr.14229) 836 : audit [DBG] from='client.16418 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:15 vm06 bash[28114]: audit 2026-04-15T13:48:14.455913+0000 mgr.vm06.qbbldl (mgr.14229) 837 : audit [DBG] from='client.16422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:15 vm06 bash[28114]: audit 2026-04-15T13:48:14.455913+0000 mgr.vm06.qbbldl (mgr.14229) 837 : audit [DBG] from='client.16422 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:15.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:15 vm06 bash[28114]: audit 2026-04-15T13:48:14.694821+0000 mon.vm06 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.106:0/2890904655' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:15.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:15 vm06 bash[28114]: audit 2026-04-15T13:48:14.694821+0000 mon.vm06 (mon.0) 1260 : audit [DBG] from='client.? 192.168.123.106:0/2890904655' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:16.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:16 vm09 bash[34466]: cluster 2026-04-15T13:48:14.969785+0000 mgr.vm06.qbbldl (mgr.14229) 838 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:16.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:16 vm09 bash[34466]: cluster 2026-04-15T13:48:14.969785+0000 mgr.vm06.qbbldl (mgr.14229) 838 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:16.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:16 vm06 bash[28114]: cluster 2026-04-15T13:48:14.969785+0000 mgr.vm06.qbbldl (mgr.14229) 838 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:16.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:16 vm06 bash[28114]: cluster 2026-04-15T13:48:14.969785+0000 mgr.vm06.qbbldl (mgr.14229) 838 : cluster [DBG] pgmap v465: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:18 vm09 bash[34466]: cluster 2026-04-15T13:48:16.970232+0000 mgr.vm06.qbbldl (mgr.14229) 839 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:18.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:18 vm09 bash[34466]: cluster 2026-04-15T13:48:16.970232+0000 mgr.vm06.qbbldl (mgr.14229) 839 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:18.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:18 vm06 bash[28114]: cluster 2026-04-15T13:48:16.970232+0000 mgr.vm06.qbbldl (mgr.14229) 839 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:18.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:18 vm06 bash[28114]: cluster 2026-04-15T13:48:16.970232+0000 mgr.vm06.qbbldl (mgr.14229) 839 : cluster [DBG] pgmap v466: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:48:19.913 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:20.115 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:20.115 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:20.115 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:20.115 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (90s) 69s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:20.115 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 69s ago 12m - - 2026-04-15T13:48:20.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:20 vm09 bash[34466]: cluster 2026-04-15T13:48:18.970776+0000 mgr.vm06.qbbldl (mgr.14229) 840 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:20.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:20 vm09 bash[34466]: cluster 2026-04-15T13:48:18.970776+0000 mgr.vm06.qbbldl (mgr.14229) 840 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:20.370 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:20.370 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:20.370 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:20.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:20 vm06 bash[28114]: cluster 2026-04-15T13:48:18.970776+0000 mgr.vm06.qbbldl (mgr.14229) 840 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:20.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:20 vm06 bash[28114]: cluster 2026-04-15T13:48:18.970776+0000 mgr.vm06.qbbldl (mgr.14229) 840 : cluster [DBG] pgmap v467: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:21 vm09 bash[34466]: audit 2026-04-15T13:48:19.891092+0000 mgr.vm06.qbbldl (mgr.14229) 841 : audit [DBG] from='client.16430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:21 vm09 bash[34466]: audit 2026-04-15T13:48:19.891092+0000 mgr.vm06.qbbldl (mgr.14229) 841 : audit [DBG] from='client.16430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:21 vm09 bash[34466]: audit 2026-04-15T13:48:20.107587+0000 mgr.vm06.qbbldl (mgr.14229) 842 : audit [DBG] from='client.16434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:21 vm09 bash[34466]: audit 2026-04-15T13:48:20.107587+0000 mgr.vm06.qbbldl (mgr.14229) 842 : audit [DBG] from='client.16434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:21 vm09 bash[34466]: audit 2026-04-15T13:48:20.366354+0000 mon.vm06 (mon.0) 1261 : audit [DBG] from='client.? 192.168.123.106:0/1224549278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:21 vm09 bash[34466]: audit 2026-04-15T13:48:20.366354+0000 mon.vm06 (mon.0) 1261 : audit [DBG] from='client.? 192.168.123.106:0/1224549278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:21.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:21 vm06 bash[28114]: audit 2026-04-15T13:48:19.891092+0000 mgr.vm06.qbbldl (mgr.14229) 841 : audit [DBG] from='client.16430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:21 vm06 bash[28114]: audit 2026-04-15T13:48:19.891092+0000 mgr.vm06.qbbldl (mgr.14229) 841 : audit [DBG] from='client.16430 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:21 vm06 bash[28114]: audit 2026-04-15T13:48:20.107587+0000 mgr.vm06.qbbldl (mgr.14229) 842 : audit [DBG] from='client.16434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:21 vm06 bash[28114]: audit 2026-04-15T13:48:20.107587+0000 mgr.vm06.qbbldl (mgr.14229) 842 : audit [DBG] from='client.16434 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:21.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:21 vm06 bash[28114]: audit 2026-04-15T13:48:20.366354+0000 mon.vm06 (mon.0) 1261 : audit [DBG] from='client.? 192.168.123.106:0/1224549278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:21.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:21 vm06 bash[28114]: audit 2026-04-15T13:48:20.366354+0000 mon.vm06 (mon.0) 1261 : audit [DBG] from='client.? 192.168.123.106:0/1224549278' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:22.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:22 vm09 bash[34466]: cluster 2026-04-15T13:48:20.971311+0000 mgr.vm06.qbbldl (mgr.14229) 843 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:22.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:22 vm09 bash[34466]: cluster 2026-04-15T13:48:20.971311+0000 mgr.vm06.qbbldl (mgr.14229) 843 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:22.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:22 vm06 bash[28114]: cluster 2026-04-15T13:48:20.971311+0000 mgr.vm06.qbbldl (mgr.14229) 843 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:22.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:22 vm06 bash[28114]: cluster 2026-04-15T13:48:20.971311+0000 mgr.vm06.qbbldl (mgr.14229) 843 : cluster [DBG] pgmap v468: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:23 vm09 bash[34466]: cluster 2026-04-15T13:48:22.971729+0000 mgr.vm06.qbbldl (mgr.14229) 844 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:23 vm09 bash[34466]: cluster 2026-04-15T13:48:22.971729+0000 mgr.vm06.qbbldl (mgr.14229) 844 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:23 vm09 bash[34466]: audit 2026-04-15T13:48:23.493896+0000 mon.vm06 (mon.0) 1262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:23 vm09 bash[34466]: audit 2026-04-15T13:48:23.493896+0000 mon.vm06 (mon.0) 1262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:24.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:23 vm06 bash[28114]: cluster 2026-04-15T13:48:22.971729+0000 mgr.vm06.qbbldl (mgr.14229) 844 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:24.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:23 vm06 bash[28114]: cluster 2026-04-15T13:48:22.971729+0000 mgr.vm06.qbbldl (mgr.14229) 844 : cluster [DBG] pgmap v469: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:24.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:23 vm06 bash[28114]: audit 2026-04-15T13:48:23.493896+0000 mon.vm06 (mon.0) 1262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:24.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:23 vm06 bash[28114]: audit 2026-04-15T13:48:23.493896+0000 mon.vm06 (mon.0) 1262 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:25.608 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:25.817 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:25.817 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 6m ago 12m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:25.817 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 12m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:25.817 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (96s) 75s ago 12m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:25.817 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 75s ago 12m - - 2026-04-15T13:48:26.078 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:26.079 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:26.079 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:26 vm09 bash[34466]: cluster 2026-04-15T13:48:24.972202+0000 mgr.vm06.qbbldl (mgr.14229) 845 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:26 vm09 bash[34466]: cluster 2026-04-15T13:48:24.972202+0000 mgr.vm06.qbbldl (mgr.14229) 845 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:26.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:26 vm06 bash[28114]: cluster 2026-04-15T13:48:24.972202+0000 mgr.vm06.qbbldl (mgr.14229) 845 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:26.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:26 vm06 bash[28114]: cluster 2026-04-15T13:48:24.972202+0000 mgr.vm06.qbbldl (mgr.14229) 845 : cluster [DBG] pgmap v470: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:27 vm09 bash[34466]: audit 2026-04-15T13:48:25.584930+0000 mgr.vm06.qbbldl (mgr.14229) 846 : audit [DBG] from='client.16442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:27 vm09 bash[34466]: audit 2026-04-15T13:48:25.584930+0000 mgr.vm06.qbbldl (mgr.14229) 846 : audit [DBG] from='client.16442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:27 vm09 bash[34466]: audit 2026-04-15T13:48:25.810146+0000 mgr.vm06.qbbldl (mgr.14229) 847 : audit [DBG] from='client.16446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:27 vm09 bash[34466]: audit 2026-04-15T13:48:25.810146+0000 mgr.vm06.qbbldl (mgr.14229) 847 : audit [DBG] from='client.16446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:27 vm09 bash[34466]: audit 2026-04-15T13:48:26.074685+0000 mon.vm06 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.106:0/1664458440' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:27 vm09 bash[34466]: audit 2026-04-15T13:48:26.074685+0000 mon.vm06 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.106:0/1664458440' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:27.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:27 vm06 bash[28114]: audit 2026-04-15T13:48:25.584930+0000 mgr.vm06.qbbldl (mgr.14229) 846 : audit [DBG] from='client.16442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:27 vm06 bash[28114]: audit 2026-04-15T13:48:25.584930+0000 mgr.vm06.qbbldl (mgr.14229) 846 : audit [DBG] from='client.16442 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:27 vm06 bash[28114]: audit 2026-04-15T13:48:25.810146+0000 mgr.vm06.qbbldl (mgr.14229) 847 : audit [DBG] from='client.16446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:27 vm06 bash[28114]: audit 2026-04-15T13:48:25.810146+0000 mgr.vm06.qbbldl (mgr.14229) 847 : audit [DBG] from='client.16446 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:27.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:27 vm06 bash[28114]: audit 2026-04-15T13:48:26.074685+0000 mon.vm06 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.106:0/1664458440' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:27.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:27 vm06 bash[28114]: audit 2026-04-15T13:48:26.074685+0000 mon.vm06 (mon.0) 1263 : audit [DBG] from='client.? 192.168.123.106:0/1664458440' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:28.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:28 vm09 bash[34466]: cluster 2026-04-15T13:48:26.972742+0000 mgr.vm06.qbbldl (mgr.14229) 848 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:28.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:28 vm09 bash[34466]: cluster 2026-04-15T13:48:26.972742+0000 mgr.vm06.qbbldl (mgr.14229) 848 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:28 vm06 bash[28114]: cluster 2026-04-15T13:48:26.972742+0000 mgr.vm06.qbbldl (mgr.14229) 848 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:28.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:28 vm06 bash[28114]: cluster 2026-04-15T13:48:26.972742+0000 mgr.vm06.qbbldl (mgr.14229) 848 : cluster [DBG] pgmap v471: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:30 vm06 bash[28114]: cluster 2026-04-15T13:48:28.973187+0000 mgr.vm06.qbbldl (mgr.14229) 849 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:30.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:30 vm06 bash[28114]: cluster 2026-04-15T13:48:28.973187+0000 mgr.vm06.qbbldl (mgr.14229) 849 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:30 vm09 bash[34466]: cluster 2026-04-15T13:48:28.973187+0000 mgr.vm06.qbbldl (mgr.14229) 849 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:30.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:30 vm09 bash[34466]: cluster 2026-04-15T13:48:28.973187+0000 mgr.vm06.qbbldl (mgr.14229) 849 : cluster [DBG] pgmap v472: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:31.310 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:31.534 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:31.534 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 6m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:31.534 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:31.534 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (101s) 81s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:31.534 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 81s ago 13m - - 2026-04-15T13:48:31.801 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:31.802 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:31.802 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:32 vm06 bash[28114]: cluster 2026-04-15T13:48:30.973679+0000 mgr.vm06.qbbldl (mgr.14229) 850 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:32 vm06 bash[28114]: cluster 2026-04-15T13:48:30.973679+0000 mgr.vm06.qbbldl (mgr.14229) 850 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:32 vm06 bash[28114]: audit 2026-04-15T13:48:31.285411+0000 mgr.vm06.qbbldl (mgr.14229) 851 : audit [DBG] from='client.16454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:32 vm06 bash[28114]: audit 2026-04-15T13:48:31.285411+0000 mgr.vm06.qbbldl (mgr.14229) 851 : audit [DBG] from='client.16454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:32 vm06 bash[28114]: audit 2026-04-15T13:48:31.797616+0000 mon.vm06 (mon.0) 1264 : audit [DBG] from='client.? 192.168.123.106:0/3752395912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:32.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:32 vm06 bash[28114]: audit 2026-04-15T13:48:31.797616+0000 mon.vm06 (mon.0) 1264 : audit [DBG] from='client.? 192.168.123.106:0/3752395912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:32 vm09 bash[34466]: cluster 2026-04-15T13:48:30.973679+0000 mgr.vm06.qbbldl (mgr.14229) 850 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:32 vm09 bash[34466]: cluster 2026-04-15T13:48:30.973679+0000 mgr.vm06.qbbldl (mgr.14229) 850 : cluster [DBG] pgmap v473: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:32 vm09 bash[34466]: audit 2026-04-15T13:48:31.285411+0000 mgr.vm06.qbbldl (mgr.14229) 851 : audit [DBG] from='client.16454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:32 vm09 bash[34466]: audit 2026-04-15T13:48:31.285411+0000 mgr.vm06.qbbldl (mgr.14229) 851 : audit [DBG] from='client.16454 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:32 vm09 bash[34466]: audit 2026-04-15T13:48:31.797616+0000 mon.vm06 (mon.0) 1264 : audit [DBG] from='client.? 192.168.123.106:0/3752395912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:32.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:32 vm09 bash[34466]: audit 2026-04-15T13:48:31.797616+0000 mon.vm06 (mon.0) 1264 : audit [DBG] from='client.? 192.168.123.106:0/3752395912' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:33 vm06 bash[28114]: audit 2026-04-15T13:48:31.526788+0000 mgr.vm06.qbbldl (mgr.14229) 852 : audit [DBG] from='client.16458 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:33 vm06 bash[28114]: audit 2026-04-15T13:48:31.526788+0000 mgr.vm06.qbbldl (mgr.14229) 852 : audit [DBG] from='client.16458 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:33 vm09 bash[34466]: audit 2026-04-15T13:48:31.526788+0000 mgr.vm06.qbbldl (mgr.14229) 852 : audit [DBG] from='client.16458 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:33 vm09 bash[34466]: audit 2026-04-15T13:48:31.526788+0000 mgr.vm06.qbbldl (mgr.14229) 852 : audit [DBG] from='client.16458 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:34 vm06 bash[28114]: cluster 2026-04-15T13:48:32.974116+0000 mgr.vm06.qbbldl (mgr.14229) 853 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:34 vm06 bash[28114]: cluster 2026-04-15T13:48:32.974116+0000 mgr.vm06.qbbldl (mgr.14229) 853 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:34 vm09 bash[34466]: cluster 2026-04-15T13:48:32.974116+0000 mgr.vm06.qbbldl (mgr.14229) 853 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:34 vm09 bash[34466]: cluster 2026-04-15T13:48:32.974116+0000 mgr.vm06.qbbldl (mgr.14229) 853 : cluster [DBG] pgmap v474: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:36 vm06 bash[28114]: cluster 2026-04-15T13:48:34.974595+0000 mgr.vm06.qbbldl (mgr.14229) 854 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:36.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:36 vm06 bash[28114]: cluster 2026-04-15T13:48:34.974595+0000 mgr.vm06.qbbldl (mgr.14229) 854 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:36 vm09 bash[34466]: cluster 2026-04-15T13:48:34.974595+0000 mgr.vm06.qbbldl (mgr.14229) 854 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:36.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:36 vm09 bash[34466]: cluster 2026-04-15T13:48:34.974595+0000 mgr.vm06.qbbldl (mgr.14229) 854 : cluster [DBG] pgmap v475: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:37.021 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:37.215 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:37.215 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 6m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:37.215 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (6m) 6m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:37.215 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (107s) 87s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:37.215 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 87s ago 13m - - 2026-04-15T13:48:37.452 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:37.452 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:37.452 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:37.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:37 vm06 bash[28114]: audit 2026-04-15T13:48:37.448157+0000 mon.vm06 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.106:0/214087005' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:37 vm06 bash[28114]: audit 2026-04-15T13:48:37.448157+0000 mon.vm06 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.106:0/214087005' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:37 vm09 bash[34466]: audit 2026-04-15T13:48:37.448157+0000 mon.vm06 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.106:0/214087005' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:37 vm09 bash[34466]: audit 2026-04-15T13:48:37.448157+0000 mon.vm06 (mon.0) 1265 : audit [DBG] from='client.? 192.168.123.106:0/214087005' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:38 vm06 bash[28114]: cluster 2026-04-15T13:48:36.975146+0000 mgr.vm06.qbbldl (mgr.14229) 855 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:38 vm06 bash[28114]: cluster 2026-04-15T13:48:36.975146+0000 mgr.vm06.qbbldl (mgr.14229) 855 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:38 vm06 bash[28114]: audit 2026-04-15T13:48:36.996530+0000 mgr.vm06.qbbldl (mgr.14229) 856 : audit [DBG] from='client.16466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:38 vm06 bash[28114]: audit 2026-04-15T13:48:36.996530+0000 mgr.vm06.qbbldl (mgr.14229) 856 : audit [DBG] from='client.16466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:38 vm06 bash[28114]: audit 2026-04-15T13:48:37.208584+0000 mgr.vm06.qbbldl (mgr.14229) 857 : audit [DBG] from='client.16470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:38 vm06 bash[28114]: audit 2026-04-15T13:48:37.208584+0000 mgr.vm06.qbbldl (mgr.14229) 857 : audit [DBG] from='client.16470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:38 vm09 bash[34466]: cluster 2026-04-15T13:48:36.975146+0000 mgr.vm06.qbbldl (mgr.14229) 855 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:38 vm09 bash[34466]: cluster 2026-04-15T13:48:36.975146+0000 mgr.vm06.qbbldl (mgr.14229) 855 : cluster [DBG] pgmap v476: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:38 vm09 bash[34466]: audit 2026-04-15T13:48:36.996530+0000 mgr.vm06.qbbldl (mgr.14229) 856 : audit [DBG] from='client.16466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:38 vm09 bash[34466]: audit 2026-04-15T13:48:36.996530+0000 mgr.vm06.qbbldl (mgr.14229) 856 : audit [DBG] from='client.16466 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:38 vm09 bash[34466]: audit 2026-04-15T13:48:37.208584+0000 mgr.vm06.qbbldl (mgr.14229) 857 : audit [DBG] from='client.16470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:38 vm09 bash[34466]: audit 2026-04-15T13:48:37.208584+0000 mgr.vm06.qbbldl (mgr.14229) 857 : audit [DBG] from='client.16470 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:39 vm06 bash[28114]: audit 2026-04-15T13:48:38.494010+0000 mon.vm06 (mon.0) 1266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:39 vm06 bash[28114]: audit 2026-04-15T13:48:38.494010+0000 mon.vm06 (mon.0) 1266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:39 vm06 bash[28114]: cluster 2026-04-15T13:48:38.975642+0000 mgr.vm06.qbbldl (mgr.14229) 858 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:39 vm06 bash[28114]: cluster 2026-04-15T13:48:38.975642+0000 mgr.vm06.qbbldl (mgr.14229) 858 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:39 vm09 bash[34466]: audit 2026-04-15T13:48:38.494010+0000 mon.vm06 (mon.0) 1266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:39 vm09 bash[34466]: audit 2026-04-15T13:48:38.494010+0000 mon.vm06 (mon.0) 1266 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:39 vm09 bash[34466]: cluster 2026-04-15T13:48:38.975642+0000 mgr.vm06.qbbldl (mgr.14229) 858 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:39 vm09 bash[34466]: cluster 2026-04-15T13:48:38.975642+0000 mgr.vm06.qbbldl (mgr.14229) 858 : cluster [DBG] pgmap v477: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:42 vm09 bash[34466]: cluster 2026-04-15T13:48:40.976164+0000 mgr.vm06.qbbldl (mgr.14229) 859 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:42.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:42 vm09 bash[34466]: cluster 2026-04-15T13:48:40.976164+0000 mgr.vm06.qbbldl (mgr.14229) 859 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:42.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:42 vm06 bash[28114]: cluster 2026-04-15T13:48:40.976164+0000 mgr.vm06.qbbldl (mgr.14229) 859 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:42.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:42 vm06 bash[28114]: cluster 2026-04-15T13:48:40.976164+0000 mgr.vm06.qbbldl (mgr.14229) 859 : cluster [DBG] pgmap v478: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:42.676 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:42.878 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:42.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 6m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:42.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 6m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:42.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (113s) 92s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:42.878 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 92s ago 13m - - 2026-04-15T13:48:43.128 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:43.129 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:43.129 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: audit 2026-04-15T13:48:42.651607+0000 mgr.vm06.qbbldl (mgr.14229) 860 : audit [DBG] from='client.16478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: audit 2026-04-15T13:48:42.651607+0000 mgr.vm06.qbbldl (mgr.14229) 860 : audit [DBG] from='client.16478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: audit 2026-04-15T13:48:42.869630+0000 mgr.vm06.qbbldl (mgr.14229) 861 : audit [DBG] from='client.16482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: audit 2026-04-15T13:48:42.869630+0000 mgr.vm06.qbbldl (mgr.14229) 861 : audit [DBG] from='client.16482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: cluster 2026-04-15T13:48:42.976595+0000 mgr.vm06.qbbldl (mgr.14229) 862 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: cluster 2026-04-15T13:48:42.976595+0000 mgr.vm06.qbbldl (mgr.14229) 862 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: audit 2026-04-15T13:48:43.124730+0000 mon.vm06 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.106:0/2448510597' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:44.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:44 vm09 bash[34466]: audit 2026-04-15T13:48:43.124730+0000 mon.vm06 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.106:0/2448510597' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: audit 2026-04-15T13:48:42.651607+0000 mgr.vm06.qbbldl (mgr.14229) 860 : audit [DBG] from='client.16478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: audit 2026-04-15T13:48:42.651607+0000 mgr.vm06.qbbldl (mgr.14229) 860 : audit [DBG] from='client.16478 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: audit 2026-04-15T13:48:42.869630+0000 mgr.vm06.qbbldl (mgr.14229) 861 : audit [DBG] from='client.16482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: audit 2026-04-15T13:48:42.869630+0000 mgr.vm06.qbbldl (mgr.14229) 861 : audit [DBG] from='client.16482 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: cluster 2026-04-15T13:48:42.976595+0000 mgr.vm06.qbbldl (mgr.14229) 862 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: cluster 2026-04-15T13:48:42.976595+0000 mgr.vm06.qbbldl (mgr.14229) 862 : cluster [DBG] pgmap v479: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: audit 2026-04-15T13:48:43.124730+0000 mon.vm06 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.106:0/2448510597' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:44.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:44 vm06 bash[28114]: audit 2026-04-15T13:48:43.124730+0000 mon.vm06 (mon.0) 1267 : audit [DBG] from='client.? 192.168.123.106:0/2448510597' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:46.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:46 vm09 bash[34466]: cluster 2026-04-15T13:48:44.977083+0000 mgr.vm06.qbbldl (mgr.14229) 863 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:46.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:46 vm09 bash[34466]: cluster 2026-04-15T13:48:44.977083+0000 mgr.vm06.qbbldl (mgr.14229) 863 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:46.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:46 vm06 bash[28114]: cluster 2026-04-15T13:48:44.977083+0000 mgr.vm06.qbbldl (mgr.14229) 863 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:46.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:46 vm06 bash[28114]: cluster 2026-04-15T13:48:44.977083+0000 mgr.vm06.qbbldl (mgr.14229) 863 : cluster [DBG] pgmap v480: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:48.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:48 vm09 bash[34466]: cluster 2026-04-15T13:48:46.977689+0000 mgr.vm06.qbbldl (mgr.14229) 864 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:48.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:48 vm09 bash[34466]: cluster 2026-04-15T13:48:46.977689+0000 mgr.vm06.qbbldl (mgr.14229) 864 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:48.375 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:48.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:48 vm06 bash[28114]: cluster 2026-04-15T13:48:46.977689+0000 mgr.vm06.qbbldl (mgr.14229) 864 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:48.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:48 vm06 bash[28114]: cluster 2026-04-15T13:48:46.977689+0000 mgr.vm06.qbbldl (mgr.14229) 864 : cluster [DBG] pgmap v481: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:48.601 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:48.601 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:48.601 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:48.601 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (119s) 98s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:48.601 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 98s ago 13m - - 2026-04-15T13:48:48.852 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:48.852 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:48.852 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:49 vm06 bash[28114]: audit 2026-04-15T13:48:48.350407+0000 mgr.vm06.qbbldl (mgr.14229) 865 : audit [DBG] from='client.16490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:49 vm06 bash[28114]: audit 2026-04-15T13:48:48.350407+0000 mgr.vm06.qbbldl (mgr.14229) 865 : audit [DBG] from='client.16490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:49 vm06 bash[28114]: audit 2026-04-15T13:48:48.847930+0000 mon.vm06 (mon.0) 1268 : audit [DBG] from='client.? 192.168.123.106:0/490657269' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:49 vm06 bash[28114]: audit 2026-04-15T13:48:48.847930+0000 mon.vm06 (mon.0) 1268 : audit [DBG] from='client.? 192.168.123.106:0/490657269' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:49.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:49 vm09 bash[34466]: audit 2026-04-15T13:48:48.350407+0000 mgr.vm06.qbbldl (mgr.14229) 865 : audit [DBG] from='client.16490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:49.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:49 vm09 bash[34466]: audit 2026-04-15T13:48:48.350407+0000 mgr.vm06.qbbldl (mgr.14229) 865 : audit [DBG] from='client.16490 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:49.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:49 vm09 bash[34466]: audit 2026-04-15T13:48:48.847930+0000 mon.vm06 (mon.0) 1268 : audit [DBG] from='client.? 192.168.123.106:0/490657269' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:49.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:49 vm09 bash[34466]: audit 2026-04-15T13:48:48.847930+0000 mon.vm06 (mon.0) 1268 : audit [DBG] from='client.? 192.168.123.106:0/490657269' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:50.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:50 vm09 bash[34466]: audit 2026-04-15T13:48:48.594592+0000 mgr.vm06.qbbldl (mgr.14229) 866 : audit [DBG] from='client.16494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:50.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:50 vm09 bash[34466]: audit 2026-04-15T13:48:48.594592+0000 mgr.vm06.qbbldl (mgr.14229) 866 : audit [DBG] from='client.16494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:50.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:50 vm09 bash[34466]: cluster 2026-04-15T13:48:48.978147+0000 mgr.vm06.qbbldl (mgr.14229) 867 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:50.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:50 vm09 bash[34466]: cluster 2026-04-15T13:48:48.978147+0000 mgr.vm06.qbbldl (mgr.14229) 867 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:50.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:50 vm06 bash[28114]: audit 2026-04-15T13:48:48.594592+0000 mgr.vm06.qbbldl (mgr.14229) 866 : audit [DBG] from='client.16494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:50.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:50 vm06 bash[28114]: audit 2026-04-15T13:48:48.594592+0000 mgr.vm06.qbbldl (mgr.14229) 866 : audit [DBG] from='client.16494 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:50.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:50 vm06 bash[28114]: cluster 2026-04-15T13:48:48.978147+0000 mgr.vm06.qbbldl (mgr.14229) 867 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:50.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:50 vm06 bash[28114]: cluster 2026-04-15T13:48:48.978147+0000 mgr.vm06.qbbldl (mgr.14229) 867 : cluster [DBG] pgmap v482: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:52.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:52 vm06 bash[28114]: cluster 2026-04-15T13:48:50.978603+0000 mgr.vm06.qbbldl (mgr.14229) 868 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:52.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:52 vm06 bash[28114]: cluster 2026-04-15T13:48:50.978603+0000 mgr.vm06.qbbldl (mgr.14229) 868 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:52 vm09 bash[34466]: cluster 2026-04-15T13:48:50.978603+0000 mgr.vm06.qbbldl (mgr.14229) 868 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:52.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:52 vm09 bash[34466]: cluster 2026-04-15T13:48:50.978603+0000 mgr.vm06.qbbldl (mgr.14229) 868 : cluster [DBG] pgmap v483: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:48:54.078 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:54.293 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:54.293 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:54.293 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:54.293 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 104s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:54.293 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 104s ago 13m - - 2026-04-15T13:48:54.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:54 vm06 bash[28114]: cluster 2026-04-15T13:48:52.979125+0000 mgr.vm06.qbbldl (mgr.14229) 869 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:54.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:54 vm06 bash[28114]: cluster 2026-04-15T13:48:52.979125+0000 mgr.vm06.qbbldl (mgr.14229) 869 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:54.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:54 vm06 bash[28114]: audit 2026-04-15T13:48:53.494207+0000 mon.vm06 (mon.0) 1269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:54.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:54 vm06 bash[28114]: audit 2026-04-15T13:48:53.494207+0000 mon.vm06 (mon.0) 1269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:54.550 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:48:54.550 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:48:54.550 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:48:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:54 vm09 bash[34466]: cluster 2026-04-15T13:48:52.979125+0000 mgr.vm06.qbbldl (mgr.14229) 869 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:54 vm09 bash[34466]: cluster 2026-04-15T13:48:52.979125+0000 mgr.vm06.qbbldl (mgr.14229) 869 : cluster [DBG] pgmap v484: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:48:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:54 vm09 bash[34466]: audit 2026-04-15T13:48:53.494207+0000 mon.vm06 (mon.0) 1269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:54 vm09 bash[34466]: audit 2026-04-15T13:48:53.494207+0000 mon.vm06 (mon.0) 1269 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:48:55.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:55 vm06 bash[28114]: audit 2026-04-15T13:48:54.054747+0000 mgr.vm06.qbbldl (mgr.14229) 870 : audit [DBG] from='client.16502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:55 vm06 bash[28114]: audit 2026-04-15T13:48:54.054747+0000 mgr.vm06.qbbldl (mgr.14229) 870 : audit [DBG] from='client.16502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:55 vm06 bash[28114]: audit 2026-04-15T13:48:54.284662+0000 mgr.vm06.qbbldl (mgr.14229) 871 : audit [DBG] from='client.25645 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:55 vm06 bash[28114]: audit 2026-04-15T13:48:54.284662+0000 mgr.vm06.qbbldl (mgr.14229) 871 : audit [DBG] from='client.25645 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:55 vm06 bash[28114]: audit 2026-04-15T13:48:54.546119+0000 mon.vm06 (mon.0) 1270 : audit [DBG] from='client.? 192.168.123.106:0/1194437285' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:55.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:55 vm06 bash[28114]: audit 2026-04-15T13:48:54.546119+0000 mon.vm06 (mon.0) 1270 : audit [DBG] from='client.? 192.168.123.106:0/1194437285' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:55 vm09 bash[34466]: audit 2026-04-15T13:48:54.054747+0000 mgr.vm06.qbbldl (mgr.14229) 870 : audit [DBG] from='client.16502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:55 vm09 bash[34466]: audit 2026-04-15T13:48:54.054747+0000 mgr.vm06.qbbldl (mgr.14229) 870 : audit [DBG] from='client.16502 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:55 vm09 bash[34466]: audit 2026-04-15T13:48:54.284662+0000 mgr.vm06.qbbldl (mgr.14229) 871 : audit [DBG] from='client.25645 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:55 vm09 bash[34466]: audit 2026-04-15T13:48:54.284662+0000 mgr.vm06.qbbldl (mgr.14229) 871 : audit [DBG] from='client.25645 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:48:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:55 vm09 bash[34466]: audit 2026-04-15T13:48:54.546119+0000 mon.vm06 (mon.0) 1270 : audit [DBG] from='client.? 192.168.123.106:0/1194437285' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:55 vm09 bash[34466]: audit 2026-04-15T13:48:54.546119+0000 mon.vm06 (mon.0) 1270 : audit [DBG] from='client.? 192.168.123.106:0/1194437285' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:48:56.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:56 vm06 bash[28114]: cluster 2026-04-15T13:48:54.979701+0000 mgr.vm06.qbbldl (mgr.14229) 872 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:56.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:56 vm06 bash[28114]: cluster 2026-04-15T13:48:54.979701+0000 mgr.vm06.qbbldl (mgr.14229) 872 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:56 vm09 bash[34466]: cluster 2026-04-15T13:48:54.979701+0000 mgr.vm06.qbbldl (mgr.14229) 872 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:56.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:56 vm09 bash[34466]: cluster 2026-04-15T13:48:54.979701+0000 mgr.vm06.qbbldl (mgr.14229) 872 : cluster [DBG] pgmap v485: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:58.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:58 vm06 bash[28114]: cluster 2026-04-15T13:48:56.980316+0000 mgr.vm06.qbbldl (mgr.14229) 873 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:58.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:48:58 vm06 bash[28114]: cluster 2026-04-15T13:48:56.980316+0000 mgr.vm06.qbbldl (mgr.14229) 873 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:58.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:58 vm09 bash[34466]: cluster 2026-04-15T13:48:56.980316+0000 mgr.vm06.qbbldl (mgr.14229) 873 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:58.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:48:58 vm09 bash[34466]: cluster 2026-04-15T13:48:56.980316+0000 mgr.vm06.qbbldl (mgr.14229) 873 : cluster [DBG] pgmap v486: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:48:59.762 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:48:59.963 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:48:59.963 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:48:59.963 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:48:59.963 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 109s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:48:59.963 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 109s ago 13m - - 2026-04-15T13:49:00.205 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:00.205 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:00.205 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:00.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:00 vm06 bash[28114]: cluster 2026-04-15T13:48:58.980933+0000 mgr.vm06.qbbldl (mgr.14229) 874 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:00.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:00 vm06 bash[28114]: cluster 2026-04-15T13:48:58.980933+0000 mgr.vm06.qbbldl (mgr.14229) 874 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:00 vm09 bash[34466]: cluster 2026-04-15T13:48:58.980933+0000 mgr.vm06.qbbldl (mgr.14229) 874 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:00 vm09 bash[34466]: cluster 2026-04-15T13:48:58.980933+0000 mgr.vm06.qbbldl (mgr.14229) 874 : cluster [DBG] pgmap v487: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:01 vm06 bash[28114]: audit 2026-04-15T13:48:59.740228+0000 mgr.vm06.qbbldl (mgr.14229) 875 : audit [DBG] from='client.16514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:01 vm06 bash[28114]: audit 2026-04-15T13:48:59.740228+0000 mgr.vm06.qbbldl (mgr.14229) 875 : audit [DBG] from='client.16514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:01 vm06 bash[28114]: audit 2026-04-15T13:48:59.955437+0000 mgr.vm06.qbbldl (mgr.14229) 876 : audit [DBG] from='client.16518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:01 vm06 bash[28114]: audit 2026-04-15T13:48:59.955437+0000 mgr.vm06.qbbldl (mgr.14229) 876 : audit [DBG] from='client.16518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:01 vm06 bash[28114]: audit 2026-04-15T13:49:00.201432+0000 mon.vm06 (mon.0) 1271 : audit [DBG] from='client.? 192.168.123.106:0/2480137813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:01.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:01 vm06 bash[28114]: audit 2026-04-15T13:49:00.201432+0000 mon.vm06 (mon.0) 1271 : audit [DBG] from='client.? 192.168.123.106:0/2480137813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:01 vm09 bash[34466]: audit 2026-04-15T13:48:59.740228+0000 mgr.vm06.qbbldl (mgr.14229) 875 : audit [DBG] from='client.16514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:01 vm09 bash[34466]: audit 2026-04-15T13:48:59.740228+0000 mgr.vm06.qbbldl (mgr.14229) 875 : audit [DBG] from='client.16514 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:01 vm09 bash[34466]: audit 2026-04-15T13:48:59.955437+0000 mgr.vm06.qbbldl (mgr.14229) 876 : audit [DBG] from='client.16518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:01 vm09 bash[34466]: audit 2026-04-15T13:48:59.955437+0000 mgr.vm06.qbbldl (mgr.14229) 876 : audit [DBG] from='client.16518 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:01 vm09 bash[34466]: audit 2026-04-15T13:49:00.201432+0000 mon.vm06 (mon.0) 1271 : audit [DBG] from='client.? 192.168.123.106:0/2480137813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:01 vm09 bash[34466]: audit 2026-04-15T13:49:00.201432+0000 mon.vm06 (mon.0) 1271 : audit [DBG] from='client.? 192.168.123.106:0/2480137813' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:02.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:02 vm06 bash[28114]: cluster 2026-04-15T13:49:00.981459+0000 mgr.vm06.qbbldl (mgr.14229) 877 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:02.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:02 vm06 bash[28114]: cluster 2026-04-15T13:49:00.981459+0000 mgr.vm06.qbbldl (mgr.14229) 877 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:02 vm09 bash[34466]: cluster 2026-04-15T13:49:00.981459+0000 mgr.vm06.qbbldl (mgr.14229) 877 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:02 vm09 bash[34466]: cluster 2026-04-15T13:49:00.981459+0000 mgr.vm06.qbbldl (mgr.14229) 877 : cluster [DBG] pgmap v488: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:04.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:04 vm06 bash[28114]: cluster 2026-04-15T13:49:02.981919+0000 mgr.vm06.qbbldl (mgr.14229) 878 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:04.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:04 vm06 bash[28114]: cluster 2026-04-15T13:49:02.981919+0000 mgr.vm06.qbbldl (mgr.14229) 878 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:04 vm09 bash[34466]: cluster 2026-04-15T13:49:02.981919+0000 mgr.vm06.qbbldl (mgr.14229) 878 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:04 vm09 bash[34466]: cluster 2026-04-15T13:49:02.981919+0000 mgr.vm06.qbbldl (mgr.14229) 878 : cluster [DBG] pgmap v489: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:05.423 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:05.610 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:05.610 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:05.610 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:05.610 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 115s ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:05.610 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 115s ago 13m - - 2026-04-15T13:49:05.865 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:05.865 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:05.865 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:06 vm06 bash[28114]: cluster 2026-04-15T13:49:04.982304+0000 mgr.vm06.qbbldl (mgr.14229) 879 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:06 vm06 bash[28114]: cluster 2026-04-15T13:49:04.982304+0000 mgr.vm06.qbbldl (mgr.14229) 879 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:06 vm06 bash[28114]: audit 2026-04-15T13:49:05.401044+0000 mgr.vm06.qbbldl (mgr.14229) 880 : audit [DBG] from='client.16526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:06 vm06 bash[28114]: audit 2026-04-15T13:49:05.401044+0000 mgr.vm06.qbbldl (mgr.14229) 880 : audit [DBG] from='client.16526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:06 vm06 bash[28114]: audit 2026-04-15T13:49:05.861623+0000 mon.vm06 (mon.0) 1272 : audit [DBG] from='client.? 192.168.123.106:0/720758717' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:06.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:06 vm06 bash[28114]: audit 2026-04-15T13:49:05.861623+0000 mon.vm06 (mon.0) 1272 : audit [DBG] from='client.? 192.168.123.106:0/720758717' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:06 vm09 bash[34466]: cluster 2026-04-15T13:49:04.982304+0000 mgr.vm06.qbbldl (mgr.14229) 879 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:06 vm09 bash[34466]: cluster 2026-04-15T13:49:04.982304+0000 mgr.vm06.qbbldl (mgr.14229) 879 : cluster [DBG] pgmap v490: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:06 vm09 bash[34466]: audit 2026-04-15T13:49:05.401044+0000 mgr.vm06.qbbldl (mgr.14229) 880 : audit [DBG] from='client.16526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:06 vm09 bash[34466]: audit 2026-04-15T13:49:05.401044+0000 mgr.vm06.qbbldl (mgr.14229) 880 : audit [DBG] from='client.16526 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:06 vm09 bash[34466]: audit 2026-04-15T13:49:05.861623+0000 mon.vm06 (mon.0) 1272 : audit [DBG] from='client.? 192.168.123.106:0/720758717' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:06 vm09 bash[34466]: audit 2026-04-15T13:49:05.861623+0000 mon.vm06 (mon.0) 1272 : audit [DBG] from='client.? 192.168.123.106:0/720758717' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:07.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:07 vm06 bash[28114]: audit 2026-04-15T13:49:05.602818+0000 mgr.vm06.qbbldl (mgr.14229) 881 : audit [DBG] from='client.16530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:07.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:07 vm06 bash[28114]: audit 2026-04-15T13:49:05.602818+0000 mgr.vm06.qbbldl (mgr.14229) 881 : audit [DBG] from='client.16530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:07 vm09 bash[34466]: audit 2026-04-15T13:49:05.602818+0000 mgr.vm06.qbbldl (mgr.14229) 881 : audit [DBG] from='client.16530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:07.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:07 vm09 bash[34466]: audit 2026-04-15T13:49:05.602818+0000 mgr.vm06.qbbldl (mgr.14229) 881 : audit [DBG] from='client.16530 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:08.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:08 vm06 bash[28114]: cluster 2026-04-15T13:49:06.982881+0000 mgr.vm06.qbbldl (mgr.14229) 882 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:08.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:08 vm06 bash[28114]: cluster 2026-04-15T13:49:06.982881+0000 mgr.vm06.qbbldl (mgr.14229) 882 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:08 vm09 bash[34466]: cluster 2026-04-15T13:49:06.982881+0000 mgr.vm06.qbbldl (mgr.14229) 882 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:08.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:08 vm09 bash[34466]: cluster 2026-04-15T13:49:06.982881+0000 mgr.vm06.qbbldl (mgr.14229) 882 : cluster [DBG] pgmap v491: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:09.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:09 vm06 bash[28114]: audit 2026-04-15T13:49:08.494346+0000 mon.vm06 (mon.0) 1273 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:09.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:09 vm06 bash[28114]: audit 2026-04-15T13:49:08.494346+0000 mon.vm06 (mon.0) 1273 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:09 vm09 bash[34466]: audit 2026-04-15T13:49:08.494346+0000 mon.vm06 (mon.0) 1273 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:09.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:09 vm09 bash[34466]: audit 2026-04-15T13:49:08.494346+0000 mon.vm06 (mon.0) 1273 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:10.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:10 vm06 bash[28114]: cluster 2026-04-15T13:49:08.983379+0000 mgr.vm06.qbbldl (mgr.14229) 883 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:10.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:10 vm06 bash[28114]: cluster 2026-04-15T13:49:08.983379+0000 mgr.vm06.qbbldl (mgr.14229) 883 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:10.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:10 vm09 bash[34466]: cluster 2026-04-15T13:49:08.983379+0000 mgr.vm06.qbbldl (mgr.14229) 883 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:10.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:10 vm09 bash[34466]: cluster 2026-04-15T13:49:08.983379+0000 mgr.vm06.qbbldl (mgr.14229) 883 : cluster [DBG] pgmap v492: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:11.083 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:11.291 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:11.291 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:11.291 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:11.291 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:11.291 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 13m - - 2026-04-15T13:49:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:11 vm06 bash[28114]: audit 2026-04-15T13:49:10.988885+0000 mon.vm06 (mon.0) 1274 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:49:11.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:11 vm06 bash[28114]: audit 2026-04-15T13:49:10.988885+0000 mon.vm06 (mon.0) 1274 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:49:11.561 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:11.561 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:11.561 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:11 vm09 bash[34466]: audit 2026-04-15T13:49:10.988885+0000 mon.vm06 (mon.0) 1274 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:49:11.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:11 vm09 bash[34466]: audit 2026-04-15T13:49:10.988885+0000 mon.vm06 (mon.0) 1274 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: cluster 2026-04-15T13:49:10.983863+0000 mgr.vm06.qbbldl (mgr.14229) 884 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: cluster 2026-04-15T13:49:10.983863+0000 mgr.vm06.qbbldl (mgr.14229) 884 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.055670+0000 mgr.vm06.qbbldl (mgr.14229) 885 : audit [DBG] from='client.16538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.055670+0000 mgr.vm06.qbbldl (mgr.14229) 885 : audit [DBG] from='client.16538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.283755+0000 mgr.vm06.qbbldl (mgr.14229) 886 : audit [DBG] from='client.16542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.283755+0000 mgr.vm06.qbbldl (mgr.14229) 886 : audit [DBG] from='client.16542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.408592+0000 mon.vm06 (mon.0) 1275 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.408592+0000 mon.vm06 (mon.0) 1275 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.409563+0000 mon.vm06 (mon.0) 1276 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.409563+0000 mon.vm06 (mon.0) 1276 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: cluster 2026-04-15T13:49:11.411107+0000 mgr.vm06.qbbldl (mgr.14229) 887 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: cluster 2026-04-15T13:49:11.411107+0000 mgr.vm06.qbbldl (mgr.14229) 887 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: cluster 2026-04-15T13:49:11.411363+0000 mgr.vm06.qbbldl (mgr.14229) 888 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: cluster 2026-04-15T13:49:11.411363+0000 mgr.vm06.qbbldl (mgr.14229) 888 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.418486+0000 mon.vm06 (mon.0) 1277 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.418486+0000 mon.vm06 (mon.0) 1277 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.420512+0000 mon.vm06 (mon.0) 1278 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.420512+0000 mon.vm06 (mon.0) 1278 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.556639+0000 mon.vm06 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.106:0/2627095323' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:12.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:12 vm06 bash[28114]: audit 2026-04-15T13:49:11.556639+0000 mon.vm06 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.106:0/2627095323' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: cluster 2026-04-15T13:49:10.983863+0000 mgr.vm06.qbbldl (mgr.14229) 884 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: cluster 2026-04-15T13:49:10.983863+0000 mgr.vm06.qbbldl (mgr.14229) 884 : cluster [DBG] pgmap v493: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.055670+0000 mgr.vm06.qbbldl (mgr.14229) 885 : audit [DBG] from='client.16538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.055670+0000 mgr.vm06.qbbldl (mgr.14229) 885 : audit [DBG] from='client.16538 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.283755+0000 mgr.vm06.qbbldl (mgr.14229) 886 : audit [DBG] from='client.16542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.283755+0000 mgr.vm06.qbbldl (mgr.14229) 886 : audit [DBG] from='client.16542 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.408592+0000 mon.vm06 (mon.0) 1275 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.408592+0000 mon.vm06 (mon.0) 1275 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.409563+0000 mon.vm06 (mon.0) 1276 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:49:12.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.409563+0000 mon.vm06 (mon.0) 1276 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: cluster 2026-04-15T13:49:11.411107+0000 mgr.vm06.qbbldl (mgr.14229) 887 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: cluster 2026-04-15T13:49:11.411107+0000 mgr.vm06.qbbldl (mgr.14229) 887 : cluster [DBG] pgmap v494: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: cluster 2026-04-15T13:49:11.411363+0000 mgr.vm06.qbbldl (mgr.14229) 888 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: cluster 2026-04-15T13:49:11.411363+0000 mgr.vm06.qbbldl (mgr.14229) 888 : cluster [DBG] pgmap v495: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.418486+0000 mon.vm06 (mon.0) 1277 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.418486+0000 mon.vm06 (mon.0) 1277 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.420512+0000 mon.vm06 (mon.0) 1278 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.420512+0000 mon.vm06 (mon.0) 1278 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.556639+0000 mon.vm06 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.106:0/2627095323' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:12.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:12 vm09 bash[34466]: audit 2026-04-15T13:49:11.556639+0000 mon.vm06 (mon.0) 1279 : audit [DBG] from='client.? 192.168.123.106:0/2627095323' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:14.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:14 vm06 bash[28114]: cluster 2026-04-15T13:49:13.411760+0000 mgr.vm06.qbbldl (mgr.14229) 889 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:14.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:14 vm06 bash[28114]: cluster 2026-04-15T13:49:13.411760+0000 mgr.vm06.qbbldl (mgr.14229) 889 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:14 vm09 bash[34466]: cluster 2026-04-15T13:49:13.411760+0000 mgr.vm06.qbbldl (mgr.14229) 889 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:14 vm09 bash[34466]: cluster 2026-04-15T13:49:13.411760+0000 mgr.vm06.qbbldl (mgr.14229) 889 : cluster [DBG] pgmap v496: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:15 vm09 bash[34466]: cluster 2026-04-15T13:49:15.412312+0000 mgr.vm06.qbbldl (mgr.14229) 890 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:15 vm09 bash[34466]: cluster 2026-04-15T13:49:15.412312+0000 mgr.vm06.qbbldl (mgr.14229) 890 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:16.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:15 vm06 bash[28114]: cluster 2026-04-15T13:49:15.412312+0000 mgr.vm06.qbbldl (mgr.14229) 890 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:16.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:15 vm06 bash[28114]: cluster 2026-04-15T13:49:15.412312+0000 mgr.vm06.qbbldl (mgr.14229) 890 : cluster [DBG] pgmap v497: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:16.801 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:17.009 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:17.009 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (12m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:17.009 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:17.009 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:17.009 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 13m - - 2026-04-15T13:49:17.273 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:17.274 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:17.274 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:17 vm09 bash[34466]: audit 2026-04-15T13:49:17.269564+0000 mon.vm06 (mon.0) 1280 : audit [DBG] from='client.? 192.168.123.106:0/901889435' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:17.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:17 vm09 bash[34466]: audit 2026-04-15T13:49:17.269564+0000 mon.vm06 (mon.0) 1280 : audit [DBG] from='client.? 192.168.123.106:0/901889435' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:17.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:17 vm06 bash[28114]: audit 2026-04-15T13:49:17.269564+0000 mon.vm06 (mon.0) 1280 : audit [DBG] from='client.? 192.168.123.106:0/901889435' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:17.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:17 vm06 bash[28114]: audit 2026-04-15T13:49:17.269564+0000 mon.vm06 (mon.0) 1280 : audit [DBG] from='client.? 192.168.123.106:0/901889435' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:18 vm09 bash[34466]: audit 2026-04-15T13:49:16.774554+0000 mgr.vm06.qbbldl (mgr.14229) 891 : audit [DBG] from='client.16550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:18 vm09 bash[34466]: audit 2026-04-15T13:49:16.774554+0000 mgr.vm06.qbbldl (mgr.14229) 891 : audit [DBG] from='client.16550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:18 vm09 bash[34466]: audit 2026-04-15T13:49:17.002252+0000 mgr.vm06.qbbldl (mgr.14229) 892 : audit [DBG] from='client.16554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:18 vm09 bash[34466]: audit 2026-04-15T13:49:17.002252+0000 mgr.vm06.qbbldl (mgr.14229) 892 : audit [DBG] from='client.16554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:18 vm09 bash[34466]: cluster 2026-04-15T13:49:17.412820+0000 mgr.vm06.qbbldl (mgr.14229) 893 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:18.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:18 vm09 bash[34466]: cluster 2026-04-15T13:49:17.412820+0000 mgr.vm06.qbbldl (mgr.14229) 893 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:18.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:18 vm06 bash[28114]: audit 2026-04-15T13:49:16.774554+0000 mgr.vm06.qbbldl (mgr.14229) 891 : audit [DBG] from='client.16550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:18 vm06 bash[28114]: audit 2026-04-15T13:49:16.774554+0000 mgr.vm06.qbbldl (mgr.14229) 891 : audit [DBG] from='client.16550 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:18 vm06 bash[28114]: audit 2026-04-15T13:49:17.002252+0000 mgr.vm06.qbbldl (mgr.14229) 892 : audit [DBG] from='client.16554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:18 vm06 bash[28114]: audit 2026-04-15T13:49:17.002252+0000 mgr.vm06.qbbldl (mgr.14229) 892 : audit [DBG] from='client.16554 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:18.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:18 vm06 bash[28114]: cluster 2026-04-15T13:49:17.412820+0000 mgr.vm06.qbbldl (mgr.14229) 893 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:18.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:18 vm06 bash[28114]: cluster 2026-04-15T13:49:17.412820+0000 mgr.vm06.qbbldl (mgr.14229) 893 : cluster [DBG] pgmap v498: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 121 B/s rd, 242 B/s wr, 0 op/s 2026-04-15T13:49:20.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:20 vm06 bash[28114]: cluster 2026-04-15T13:49:19.413346+0000 mgr.vm06.qbbldl (mgr.14229) 894 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:20.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:20 vm06 bash[28114]: cluster 2026-04-15T13:49:19.413346+0000 mgr.vm06.qbbldl (mgr.14229) 894 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:20.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:20 vm09 bash[34466]: cluster 2026-04-15T13:49:19.413346+0000 mgr.vm06.qbbldl (mgr.14229) 894 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:20.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:20 vm09 bash[34466]: cluster 2026-04-15T13:49:19.413346+0000 mgr.vm06.qbbldl (mgr.14229) 894 : cluster [DBG] pgmap v499: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:21 vm06 bash[28114]: cluster 2026-04-15T13:49:21.413783+0000 mgr.vm06.qbbldl (mgr.14229) 895 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:21.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:21 vm06 bash[28114]: cluster 2026-04-15T13:49:21.413783+0000 mgr.vm06.qbbldl (mgr.14229) 895 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:21 vm09 bash[34466]: cluster 2026-04-15T13:49:21.413783+0000 mgr.vm06.qbbldl (mgr.14229) 895 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:21 vm09 bash[34466]: cluster 2026-04-15T13:49:21.413783+0000 mgr.vm06.qbbldl (mgr.14229) 895 : cluster [DBG] pgmap v500: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:22.523 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:22.782 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:22.782 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:22.782 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 13m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:22.782 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 13m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:22.782 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 13m - - 2026-04-15T13:49:23.032 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:23.032 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:23.032 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:23.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:23 vm09 bash[34466]: audit 2026-04-15T13:49:23.028511+0000 mon.vm06 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.106:0/2058201229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:23.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:23 vm09 bash[34466]: audit 2026-04-15T13:49:23.028511+0000 mon.vm06 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.106:0/2058201229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:23.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:23 vm06 bash[28114]: audit 2026-04-15T13:49:23.028511+0000 mon.vm06 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.106:0/2058201229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:23.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:23 vm06 bash[28114]: audit 2026-04-15T13:49:23.028511+0000 mon.vm06 (mon.0) 1281 : audit [DBG] from='client.? 192.168.123.106:0/2058201229' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: audit 2026-04-15T13:49:22.493193+0000 mgr.vm06.qbbldl (mgr.14229) 896 : audit [DBG] from='client.16562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: audit 2026-04-15T13:49:22.493193+0000 mgr.vm06.qbbldl (mgr.14229) 896 : audit [DBG] from='client.16562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: audit 2026-04-15T13:49:22.775208+0000 mgr.vm06.qbbldl (mgr.14229) 897 : audit [DBG] from='client.16566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: audit 2026-04-15T13:49:22.775208+0000 mgr.vm06.qbbldl (mgr.14229) 897 : audit [DBG] from='client.16566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: cluster 2026-04-15T13:49:23.414199+0000 mgr.vm06.qbbldl (mgr.14229) 898 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: cluster 2026-04-15T13:49:23.414199+0000 mgr.vm06.qbbldl (mgr.14229) 898 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: audit 2026-04-15T13:49:23.494583+0000 mon.vm06 (mon.0) 1282 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:24 vm09 bash[34466]: audit 2026-04-15T13:49:23.494583+0000 mon.vm06 (mon.0) 1282 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: audit 2026-04-15T13:49:22.493193+0000 mgr.vm06.qbbldl (mgr.14229) 896 : audit [DBG] from='client.16562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: audit 2026-04-15T13:49:22.493193+0000 mgr.vm06.qbbldl (mgr.14229) 896 : audit [DBG] from='client.16562 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: audit 2026-04-15T13:49:22.775208+0000 mgr.vm06.qbbldl (mgr.14229) 897 : audit [DBG] from='client.16566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: audit 2026-04-15T13:49:22.775208+0000 mgr.vm06.qbbldl (mgr.14229) 897 : audit [DBG] from='client.16566 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: cluster 2026-04-15T13:49:23.414199+0000 mgr.vm06.qbbldl (mgr.14229) 898 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: cluster 2026-04-15T13:49:23.414199+0000 mgr.vm06.qbbldl (mgr.14229) 898 : cluster [DBG] pgmap v501: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: audit 2026-04-15T13:49:23.494583+0000 mon.vm06 (mon.0) 1282 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:24.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:24 vm06 bash[28114]: audit 2026-04-15T13:49:23.494583+0000 mon.vm06 (mon.0) 1282 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:25 vm09 bash[34466]: cluster 2026-04-15T13:49:25.414677+0000 mgr.vm06.qbbldl (mgr.14229) 899 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:25 vm09 bash[34466]: cluster 2026-04-15T13:49:25.414677+0000 mgr.vm06.qbbldl (mgr.14229) 899 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:26.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:25 vm06 bash[28114]: cluster 2026-04-15T13:49:25.414677+0000 mgr.vm06.qbbldl (mgr.14229) 899 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:26.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:25 vm06 bash[28114]: cluster 2026-04-15T13:49:25.414677+0000 mgr.vm06.qbbldl (mgr.14229) 899 : cluster [DBG] pgmap v502: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:27 vm09 bash[34466]: cluster 2026-04-15T13:49:27.415158+0000 mgr.vm06.qbbldl (mgr.14229) 900 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:27 vm09 bash[34466]: cluster 2026-04-15T13:49:27.415158+0000 mgr.vm06.qbbldl (mgr.14229) 900 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:28.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:27 vm06 bash[28114]: cluster 2026-04-15T13:49:27.415158+0000 mgr.vm06.qbbldl (mgr.14229) 900 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:28.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:27 vm06 bash[28114]: cluster 2026-04-15T13:49:27.415158+0000 mgr.vm06.qbbldl (mgr.14229) 900 : cluster [DBG] pgmap v503: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:28.248 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:28.450 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:28.450 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 7m ago 13m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:28.450 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:28.450 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:28.450 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:49:28.726 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:28.726 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:28.726 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:28 vm09 bash[34466]: audit 2026-04-15T13:49:28.226803+0000 mgr.vm06.qbbldl (mgr.14229) 901 : audit [DBG] from='client.16574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:28 vm09 bash[34466]: audit 2026-04-15T13:49:28.226803+0000 mgr.vm06.qbbldl (mgr.14229) 901 : audit [DBG] from='client.16574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:28 vm09 bash[34466]: audit 2026-04-15T13:49:28.442821+0000 mgr.vm06.qbbldl (mgr.14229) 902 : audit [DBG] from='client.16578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:28 vm09 bash[34466]: audit 2026-04-15T13:49:28.442821+0000 mgr.vm06.qbbldl (mgr.14229) 902 : audit [DBG] from='client.16578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.889 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:28 vm06 bash[28114]: audit 2026-04-15T13:49:28.226803+0000 mgr.vm06.qbbldl (mgr.14229) 901 : audit [DBG] from='client.16574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.889 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:28 vm06 bash[28114]: audit 2026-04-15T13:49:28.226803+0000 mgr.vm06.qbbldl (mgr.14229) 901 : audit [DBG] from='client.16574 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.889 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:28 vm06 bash[28114]: audit 2026-04-15T13:49:28.442821+0000 mgr.vm06.qbbldl (mgr.14229) 902 : audit [DBG] from='client.16578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:28.889 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:28 vm06 bash[28114]: audit 2026-04-15T13:49:28.442821+0000 mgr.vm06.qbbldl (mgr.14229) 902 : audit [DBG] from='client.16578 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:29 vm09 bash[34466]: audit 2026-04-15T13:49:28.722089+0000 mon.vm06 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.106:0/2744572066' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:29 vm09 bash[34466]: audit 2026-04-15T13:49:28.722089+0000 mon.vm06 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.106:0/2744572066' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:29 vm09 bash[34466]: cluster 2026-04-15T13:49:29.415595+0000 mgr.vm06.qbbldl (mgr.14229) 903 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:29 vm09 bash[34466]: cluster 2026-04-15T13:49:29.415595+0000 mgr.vm06.qbbldl (mgr.14229) 903 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:30.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:29 vm06 bash[28114]: audit 2026-04-15T13:49:28.722089+0000 mon.vm06 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.106:0/2744572066' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:30.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:29 vm06 bash[28114]: audit 2026-04-15T13:49:28.722089+0000 mon.vm06 (mon.0) 1283 : audit [DBG] from='client.? 192.168.123.106:0/2744572066' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:30.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:29 vm06 bash[28114]: cluster 2026-04-15T13:49:29.415595+0000 mgr.vm06.qbbldl (mgr.14229) 903 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:30.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:29 vm06 bash[28114]: cluster 2026-04-15T13:49:29.415595+0000 mgr.vm06.qbbldl (mgr.14229) 903 : cluster [DBG] pgmap v504: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:32.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:31 vm06 bash[28114]: cluster 2026-04-15T13:49:31.416059+0000 mgr.vm06.qbbldl (mgr.14229) 904 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:32.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:31 vm06 bash[28114]: cluster 2026-04-15T13:49:31.416059+0000 mgr.vm06.qbbldl (mgr.14229) 904 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:32.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:31 vm09 bash[34466]: cluster 2026-04-15T13:49:31.416059+0000 mgr.vm06.qbbldl (mgr.14229) 904 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:32.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:31 vm09 bash[34466]: cluster 2026-04-15T13:49:31.416059+0000 mgr.vm06.qbbldl (mgr.14229) 904 : cluster [DBG] pgmap v505: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:33.957 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:34.179 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:34.179 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 7m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:34.179 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (7m) 7m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:34.179 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:34.179 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:49:34.441 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:34.442 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:34.442 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:34 vm06 bash[28114]: cluster 2026-04-15T13:49:33.416579+0000 mgr.vm06.qbbldl (mgr.14229) 905 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:34 vm06 bash[28114]: cluster 2026-04-15T13:49:33.416579+0000 mgr.vm06.qbbldl (mgr.14229) 905 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:34 vm06 bash[28114]: audit 2026-04-15T13:49:34.437617+0000 mon.vm06 (mon.0) 1284 : audit [DBG] from='client.? 192.168.123.106:0/2421605572' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:34 vm06 bash[28114]: audit 2026-04-15T13:49:34.437617+0000 mon.vm06 (mon.0) 1284 : audit [DBG] from='client.? 192.168.123.106:0/2421605572' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:34 vm09 bash[34466]: cluster 2026-04-15T13:49:33.416579+0000 mgr.vm06.qbbldl (mgr.14229) 905 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:34 vm09 bash[34466]: cluster 2026-04-15T13:49:33.416579+0000 mgr.vm06.qbbldl (mgr.14229) 905 : cluster [DBG] pgmap v506: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:34 vm09 bash[34466]: audit 2026-04-15T13:49:34.437617+0000 mon.vm06 (mon.0) 1284 : audit [DBG] from='client.? 192.168.123.106:0/2421605572' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:34 vm09 bash[34466]: audit 2026-04-15T13:49:34.437617+0000 mon.vm06 (mon.0) 1284 : audit [DBG] from='client.? 192.168.123.106:0/2421605572' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:35 vm06 bash[28114]: audit 2026-04-15T13:49:33.935992+0000 mgr.vm06.qbbldl (mgr.14229) 906 : audit [DBG] from='client.16586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:35 vm06 bash[28114]: audit 2026-04-15T13:49:33.935992+0000 mgr.vm06.qbbldl (mgr.14229) 906 : audit [DBG] from='client.16586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:35 vm06 bash[28114]: audit 2026-04-15T13:49:34.171190+0000 mgr.vm06.qbbldl (mgr.14229) 907 : audit [DBG] from='client.16590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:35 vm06 bash[28114]: audit 2026-04-15T13:49:34.171190+0000 mgr.vm06.qbbldl (mgr.14229) 907 : audit [DBG] from='client.16590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:35 vm06 bash[28114]: cluster 2026-04-15T13:49:35.417134+0000 mgr.vm06.qbbldl (mgr.14229) 908 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:35.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:35 vm06 bash[28114]: cluster 2026-04-15T13:49:35.417134+0000 mgr.vm06.qbbldl (mgr.14229) 908 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:35 vm09 bash[34466]: audit 2026-04-15T13:49:33.935992+0000 mgr.vm06.qbbldl (mgr.14229) 906 : audit [DBG] from='client.16586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:35 vm09 bash[34466]: audit 2026-04-15T13:49:33.935992+0000 mgr.vm06.qbbldl (mgr.14229) 906 : audit [DBG] from='client.16586 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:35 vm09 bash[34466]: audit 2026-04-15T13:49:34.171190+0000 mgr.vm06.qbbldl (mgr.14229) 907 : audit [DBG] from='client.16590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:35 vm09 bash[34466]: audit 2026-04-15T13:49:34.171190+0000 mgr.vm06.qbbldl (mgr.14229) 907 : audit [DBG] from='client.16590 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:35 vm09 bash[34466]: cluster 2026-04-15T13:49:35.417134+0000 mgr.vm06.qbbldl (mgr.14229) 908 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:35.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:35 vm09 bash[34466]: cluster 2026-04-15T13:49:35.417134+0000 mgr.vm06.qbbldl (mgr.14229) 908 : cluster [DBG] pgmap v507: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:38.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:38 vm06 bash[28114]: cluster 2026-04-15T13:49:37.417580+0000 mgr.vm06.qbbldl (mgr.14229) 909 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:38.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:38 vm06 bash[28114]: cluster 2026-04-15T13:49:37.417580+0000 mgr.vm06.qbbldl (mgr.14229) 909 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:38 vm09 bash[34466]: cluster 2026-04-15T13:49:37.417580+0000 mgr.vm06.qbbldl (mgr.14229) 909 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:38.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:38 vm09 bash[34466]: cluster 2026-04-15T13:49:37.417580+0000 mgr.vm06.qbbldl (mgr.14229) 909 : cluster [DBG] pgmap v508: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:39.661 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:39 vm06 bash[28114]: audit 2026-04-15T13:49:38.494899+0000 mon.vm06 (mon.0) 1285 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:39 vm06 bash[28114]: audit 2026-04-15T13:49:38.494899+0000 mon.vm06 (mon.0) 1285 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:39 vm06 bash[28114]: cluster 2026-04-15T13:49:39.418005+0000 mgr.vm06.qbbldl (mgr.14229) 910 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:39 vm06 bash[28114]: cluster 2026-04-15T13:49:39.418005+0000 mgr.vm06.qbbldl (mgr.14229) 910 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:39.856 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:39.856 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 7m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:39.856 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 7m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:39.856 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:39.856 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:49:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:39 vm09 bash[34466]: audit 2026-04-15T13:49:38.494899+0000 mon.vm06 (mon.0) 1285 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:39 vm09 bash[34466]: audit 2026-04-15T13:49:38.494899+0000 mon.vm06 (mon.0) 1285 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:39 vm09 bash[34466]: cluster 2026-04-15T13:49:39.418005+0000 mgr.vm06.qbbldl (mgr.14229) 910 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:39 vm09 bash[34466]: cluster 2026-04-15T13:49:39.418005+0000 mgr.vm06.qbbldl (mgr.14229) 910 : cluster [DBG] pgmap v509: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:40.146 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:40.146 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:40.146 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:40 vm06 bash[28114]: audit 2026-04-15T13:49:39.638851+0000 mgr.vm06.qbbldl (mgr.14229) 911 : audit [DBG] from='client.16598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:40 vm06 bash[28114]: audit 2026-04-15T13:49:39.638851+0000 mgr.vm06.qbbldl (mgr.14229) 911 : audit [DBG] from='client.16598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:40 vm06 bash[28114]: audit 2026-04-15T13:49:39.848459+0000 mgr.vm06.qbbldl (mgr.14229) 912 : audit [DBG] from='client.16602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:40 vm06 bash[28114]: audit 2026-04-15T13:49:39.848459+0000 mgr.vm06.qbbldl (mgr.14229) 912 : audit [DBG] from='client.16602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:40 vm06 bash[28114]: audit 2026-04-15T13:49:40.142451+0000 mon.vm06 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.106:0/2982519629' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:40.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:40 vm06 bash[28114]: audit 2026-04-15T13:49:40.142451+0000 mon.vm06 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.106:0/2982519629' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:40 vm09 bash[34466]: audit 2026-04-15T13:49:39.638851+0000 mgr.vm06.qbbldl (mgr.14229) 911 : audit [DBG] from='client.16598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:40 vm09 bash[34466]: audit 2026-04-15T13:49:39.638851+0000 mgr.vm06.qbbldl (mgr.14229) 911 : audit [DBG] from='client.16598 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:40 vm09 bash[34466]: audit 2026-04-15T13:49:39.848459+0000 mgr.vm06.qbbldl (mgr.14229) 912 : audit [DBG] from='client.16602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:40 vm09 bash[34466]: audit 2026-04-15T13:49:39.848459+0000 mgr.vm06.qbbldl (mgr.14229) 912 : audit [DBG] from='client.16602 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:40 vm09 bash[34466]: audit 2026-04-15T13:49:40.142451+0000 mon.vm06 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.106:0/2982519629' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:40 vm09 bash[34466]: audit 2026-04-15T13:49:40.142451+0000 mon.vm06 (mon.0) 1286 : audit [DBG] from='client.? 192.168.123.106:0/2982519629' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:41 vm06 bash[28114]: cluster 2026-04-15T13:49:41.418468+0000 mgr.vm06.qbbldl (mgr.14229) 913 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:41.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:41 vm06 bash[28114]: cluster 2026-04-15T13:49:41.418468+0000 mgr.vm06.qbbldl (mgr.14229) 913 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:41 vm09 bash[34466]: cluster 2026-04-15T13:49:41.418468+0000 mgr.vm06.qbbldl (mgr.14229) 913 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:41 vm09 bash[34466]: cluster 2026-04-15T13:49:41.418468+0000 mgr.vm06.qbbldl (mgr.14229) 913 : cluster [DBG] pgmap v510: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:44 vm06 bash[28114]: cluster 2026-04-15T13:49:43.418996+0000 mgr.vm06.qbbldl (mgr.14229) 914 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:44.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:44 vm06 bash[28114]: cluster 2026-04-15T13:49:43.418996+0000 mgr.vm06.qbbldl (mgr.14229) 914 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:44.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:44 vm09 bash[34466]: cluster 2026-04-15T13:49:43.418996+0000 mgr.vm06.qbbldl (mgr.14229) 914 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:44.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:44 vm09 bash[34466]: cluster 2026-04-15T13:49:43.418996+0000 mgr.vm06.qbbldl (mgr.14229) 914 : cluster [DBG] pgmap v511: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:45.384 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:45.576 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:45.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:45.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:45.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (2m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:45.577 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:49:45.824 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:45.824 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:45.824 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:45 vm09 bash[34466]: audit 2026-04-15T13:49:45.360172+0000 mgr.vm06.qbbldl (mgr.14229) 915 : audit [DBG] from='client.16610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:45 vm09 bash[34466]: audit 2026-04-15T13:49:45.360172+0000 mgr.vm06.qbbldl (mgr.14229) 915 : audit [DBG] from='client.16610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:45 vm09 bash[34466]: cluster 2026-04-15T13:49:45.419557+0000 mgr.vm06.qbbldl (mgr.14229) 916 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:45.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:45 vm09 bash[34466]: cluster 2026-04-15T13:49:45.419557+0000 mgr.vm06.qbbldl (mgr.14229) 916 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:46.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:45 vm06 bash[28114]: audit 2026-04-15T13:49:45.360172+0000 mgr.vm06.qbbldl (mgr.14229) 915 : audit [DBG] from='client.16610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:46.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:45 vm06 bash[28114]: audit 2026-04-15T13:49:45.360172+0000 mgr.vm06.qbbldl (mgr.14229) 915 : audit [DBG] from='client.16610 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:46.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:45 vm06 bash[28114]: cluster 2026-04-15T13:49:45.419557+0000 mgr.vm06.qbbldl (mgr.14229) 916 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:46.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:45 vm06 bash[28114]: cluster 2026-04-15T13:49:45.419557+0000 mgr.vm06.qbbldl (mgr.14229) 916 : cluster [DBG] pgmap v512: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:46 vm09 bash[34466]: audit 2026-04-15T13:49:45.569643+0000 mgr.vm06.qbbldl (mgr.14229) 917 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:46 vm09 bash[34466]: audit 2026-04-15T13:49:45.569643+0000 mgr.vm06.qbbldl (mgr.14229) 917 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:46 vm09 bash[34466]: audit 2026-04-15T13:49:45.820590+0000 mon.vm06 (mon.0) 1287 : audit [DBG] from='client.? 192.168.123.106:0/830113153' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:46.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:46 vm09 bash[34466]: audit 2026-04-15T13:49:45.820590+0000 mon.vm06 (mon.0) 1287 : audit [DBG] from='client.? 192.168.123.106:0/830113153' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:47.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:46 vm06 bash[28114]: audit 2026-04-15T13:49:45.569643+0000 mgr.vm06.qbbldl (mgr.14229) 917 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:47.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:46 vm06 bash[28114]: audit 2026-04-15T13:49:45.569643+0000 mgr.vm06.qbbldl (mgr.14229) 917 : audit [DBG] from='client.16614 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:47.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:46 vm06 bash[28114]: audit 2026-04-15T13:49:45.820590+0000 mon.vm06 (mon.0) 1287 : audit [DBG] from='client.? 192.168.123.106:0/830113153' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:47.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:46 vm06 bash[28114]: audit 2026-04-15T13:49:45.820590+0000 mon.vm06 (mon.0) 1287 : audit [DBG] from='client.? 192.168.123.106:0/830113153' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:47 vm09 bash[34466]: cluster 2026-04-15T13:49:47.419975+0000 mgr.vm06.qbbldl (mgr.14229) 918 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:47.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:47 vm09 bash[34466]: cluster 2026-04-15T13:49:47.419975+0000 mgr.vm06.qbbldl (mgr.14229) 918 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:48.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:47 vm06 bash[28114]: cluster 2026-04-15T13:49:47.419975+0000 mgr.vm06.qbbldl (mgr.14229) 918 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:48.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:47 vm06 bash[28114]: cluster 2026-04-15T13:49:47.419975+0000 mgr.vm06.qbbldl (mgr.14229) 918 : cluster [DBG] pgmap v513: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:49.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:49 vm09 bash[34466]: cluster 2026-04-15T13:49:49.420422+0000 mgr.vm06.qbbldl (mgr.14229) 919 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:49.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:49 vm09 bash[34466]: cluster 2026-04-15T13:49:49.420422+0000 mgr.vm06.qbbldl (mgr.14229) 919 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:49 vm06 bash[28114]: cluster 2026-04-15T13:49:49.420422+0000 mgr.vm06.qbbldl (mgr.14229) 919 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:50.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:49 vm06 bash[28114]: cluster 2026-04-15T13:49:49.420422+0000 mgr.vm06.qbbldl (mgr.14229) 919 : cluster [DBG] pgmap v514: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:49:51.058 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:51.278 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:51.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:51.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:51.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:51.278 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:49:51.537 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:51.537 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:51.537 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:51 vm09 bash[34466]: audit 2026-04-15T13:49:51.037341+0000 mgr.vm06.qbbldl (mgr.14229) 920 : audit [DBG] from='client.16622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:51 vm09 bash[34466]: audit 2026-04-15T13:49:51.037341+0000 mgr.vm06.qbbldl (mgr.14229) 920 : audit [DBG] from='client.16622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:51 vm09 bash[34466]: audit 2026-04-15T13:49:51.271399+0000 mgr.vm06.qbbldl (mgr.14229) 921 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:51 vm09 bash[34466]: audit 2026-04-15T13:49:51.271399+0000 mgr.vm06.qbbldl (mgr.14229) 921 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:51 vm09 bash[34466]: cluster 2026-04-15T13:49:51.421042+0000 mgr.vm06.qbbldl (mgr.14229) 922 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:51.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:51 vm09 bash[34466]: cluster 2026-04-15T13:49:51.421042+0000 mgr.vm06.qbbldl (mgr.14229) 922 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:51 vm06 bash[28114]: audit 2026-04-15T13:49:51.037341+0000 mgr.vm06.qbbldl (mgr.14229) 920 : audit [DBG] from='client.16622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:51 vm06 bash[28114]: audit 2026-04-15T13:49:51.037341+0000 mgr.vm06.qbbldl (mgr.14229) 920 : audit [DBG] from='client.16622 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:51 vm06 bash[28114]: audit 2026-04-15T13:49:51.271399+0000 mgr.vm06.qbbldl (mgr.14229) 921 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:51 vm06 bash[28114]: audit 2026-04-15T13:49:51.271399+0000 mgr.vm06.qbbldl (mgr.14229) 921 : audit [DBG] from='client.16626 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:51 vm06 bash[28114]: cluster 2026-04-15T13:49:51.421042+0000 mgr.vm06.qbbldl (mgr.14229) 922 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:52.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:51 vm06 bash[28114]: cluster 2026-04-15T13:49:51.421042+0000 mgr.vm06.qbbldl (mgr.14229) 922 : cluster [DBG] pgmap v515: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:52.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:52 vm09 bash[34466]: audit 2026-04-15T13:49:51.533441+0000 mon.vm06 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.106:0/3461369612' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:52.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:52 vm09 bash[34466]: audit 2026-04-15T13:49:51.533441+0000 mon.vm06 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.106:0/3461369612' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:53.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:52 vm06 bash[28114]: audit 2026-04-15T13:49:51.533441+0000 mon.vm06 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.106:0/3461369612' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:53.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:52 vm06 bash[28114]: audit 2026-04-15T13:49:51.533441+0000 mon.vm06 (mon.0) 1288 : audit [DBG] from='client.? 192.168.123.106:0/3461369612' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:53.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:53 vm09 bash[34466]: cluster 2026-04-15T13:49:53.421501+0000 mgr.vm06.qbbldl (mgr.14229) 923 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:53.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:53 vm09 bash[34466]: cluster 2026-04-15T13:49:53.421501+0000 mgr.vm06.qbbldl (mgr.14229) 923 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:53.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:53 vm09 bash[34466]: audit 2026-04-15T13:49:53.495014+0000 mon.vm06 (mon.0) 1289 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:53.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:53 vm09 bash[34466]: audit 2026-04-15T13:49:53.495014+0000 mon.vm06 (mon.0) 1289 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:54.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:53 vm06 bash[28114]: cluster 2026-04-15T13:49:53.421501+0000 mgr.vm06.qbbldl (mgr.14229) 923 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:54.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:53 vm06 bash[28114]: cluster 2026-04-15T13:49:53.421501+0000 mgr.vm06.qbbldl (mgr.14229) 923 : cluster [DBG] pgmap v516: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:49:54.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:53 vm06 bash[28114]: audit 2026-04-15T13:49:53.495014+0000 mon.vm06 (mon.0) 1289 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:54.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:53 vm06 bash[28114]: audit 2026-04-15T13:49:53.495014+0000 mon.vm06 (mon.0) 1289 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:49:56.757 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:49:56.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:56 vm06 bash[28114]: cluster 2026-04-15T13:49:55.421955+0000 mgr.vm06.qbbldl (mgr.14229) 924 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:56.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:56 vm06 bash[28114]: cluster 2026-04-15T13:49:55.421955+0000 mgr.vm06.qbbldl (mgr.14229) 924 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:56 vm09 bash[34466]: cluster 2026-04-15T13:49:55.421955+0000 mgr.vm06.qbbldl (mgr.14229) 924 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:56 vm09 bash[34466]: cluster 2026-04-15T13:49:55.421955+0000 mgr.vm06.qbbldl (mgr.14229) 924 : cluster [DBG] pgmap v517: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:56.954 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:49:56.954 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:49:56.954 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:49:56.954 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:49:56.954 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:49:57.203 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:49:57.203 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:49:57.203 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:49:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: audit 2026-04-15T13:49:56.735322+0000 mgr.vm06.qbbldl (mgr.14229) 925 : audit [DBG] from='client.16634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: audit 2026-04-15T13:49:56.735322+0000 mgr.vm06.qbbldl (mgr.14229) 925 : audit [DBG] from='client.16634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: audit 2026-04-15T13:49:56.946855+0000 mgr.vm06.qbbldl (mgr.14229) 926 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: audit 2026-04-15T13:49:56.946855+0000 mgr.vm06.qbbldl (mgr.14229) 926 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: audit 2026-04-15T13:49:57.199081+0000 mon.vm06 (mon.0) 1290 : audit [DBG] from='client.? 192.168.123.106:0/2237525240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:57.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: audit 2026-04-15T13:49:57.199081+0000 mon.vm06 (mon.0) 1290 : audit [DBG] from='client.? 192.168.123.106:0/2237525240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:57.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: cluster 2026-04-15T13:49:57.422416+0000 mgr.vm06.qbbldl (mgr.14229) 927 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:57.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:57 vm06 bash[28114]: cluster 2026-04-15T13:49:57.422416+0000 mgr.vm06.qbbldl (mgr.14229) 927 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: audit 2026-04-15T13:49:56.735322+0000 mgr.vm06.qbbldl (mgr.14229) 925 : audit [DBG] from='client.16634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: audit 2026-04-15T13:49:56.735322+0000 mgr.vm06.qbbldl (mgr.14229) 925 : audit [DBG] from='client.16634 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: audit 2026-04-15T13:49:56.946855+0000 mgr.vm06.qbbldl (mgr.14229) 926 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: audit 2026-04-15T13:49:56.946855+0000 mgr.vm06.qbbldl (mgr.14229) 926 : audit [DBG] from='client.16638 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: audit 2026-04-15T13:49:57.199081+0000 mon.vm06 (mon.0) 1290 : audit [DBG] from='client.? 192.168.123.106:0/2237525240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: audit 2026-04-15T13:49:57.199081+0000 mon.vm06 (mon.0) 1290 : audit [DBG] from='client.? 192.168.123.106:0/2237525240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: cluster 2026-04-15T13:49:57.422416+0000 mgr.vm06.qbbldl (mgr.14229) 927 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:57 vm09 bash[34466]: cluster 2026-04-15T13:49:57.422416+0000 mgr.vm06.qbbldl (mgr.14229) 927 : cluster [DBG] pgmap v518: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:59 vm09 bash[34466]: cluster 2026-04-15T13:49:59.422935+0000 mgr.vm06.qbbldl (mgr.14229) 928 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:49:59.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:49:59 vm09 bash[34466]: cluster 2026-04-15T13:49:59.422935+0000 mgr.vm06.qbbldl (mgr.14229) 928 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:00.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:59 vm06 bash[28114]: cluster 2026-04-15T13:49:59.422935+0000 mgr.vm06.qbbldl (mgr.14229) 928 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:00.033 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:49:59 vm06 bash[28114]: cluster 2026-04-15T13:49:59.422935+0000 mgr.vm06.qbbldl (mgr.14229) 928 : cluster [DBG] pgmap v519: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:00 vm09 bash[34466]: cluster 2026-04-15T13:50:00.000171+0000 mon.vm06 (mon.0) 1291 : cluster [WRN] overall HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:00.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:00 vm09 bash[34466]: cluster 2026-04-15T13:50:00.000171+0000 mon.vm06 (mon.0) 1291 : cluster [WRN] overall HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:01.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:00 vm06 bash[28114]: cluster 2026-04-15T13:50:00.000171+0000 mon.vm06 (mon.0) 1291 : cluster [WRN] overall HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:01.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:00 vm06 bash[28114]: cluster 2026-04-15T13:50:00.000171+0000 mon.vm06 (mon.0) 1291 : cluster [WRN] overall HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:01 vm09 bash[34466]: cluster 2026-04-15T13:50:01.423403+0000 mgr.vm06.qbbldl (mgr.14229) 929 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:01 vm09 bash[34466]: cluster 2026-04-15T13:50:01.423403+0000 mgr.vm06.qbbldl (mgr.14229) 929 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:02.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:01 vm06 bash[28114]: cluster 2026-04-15T13:50:01.423403+0000 mgr.vm06.qbbldl (mgr.14229) 929 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:02.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:01 vm06 bash[28114]: cluster 2026-04-15T13:50:01.423403+0000 mgr.vm06.qbbldl (mgr.14229) 929 : cluster [DBG] pgmap v520: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:02.417 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:02.634 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:02.634 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:02.634 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:02.634 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:02.634 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:50:02.890 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:02.891 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:02.891 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:03.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:02 vm06 bash[28114]: audit 2026-04-15T13:50:02.393759+0000 mgr.vm06.qbbldl (mgr.14229) 930 : audit [DBG] from='client.25737 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:03.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:02 vm06 bash[28114]: audit 2026-04-15T13:50:02.393759+0000 mgr.vm06.qbbldl (mgr.14229) 930 : audit [DBG] from='client.25737 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:03.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:02 vm09 bash[34466]: audit 2026-04-15T13:50:02.393759+0000 mgr.vm06.qbbldl (mgr.14229) 930 : audit [DBG] from='client.25737 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:03.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:02 vm09 bash[34466]: audit 2026-04-15T13:50:02.393759+0000 mgr.vm06.qbbldl (mgr.14229) 930 : audit [DBG] from='client.25737 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:04.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:03 vm06 bash[28114]: audit 2026-04-15T13:50:02.627371+0000 mgr.vm06.qbbldl (mgr.14229) 931 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:04.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:03 vm06 bash[28114]: audit 2026-04-15T13:50:02.627371+0000 mgr.vm06.qbbldl (mgr.14229) 931 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:04.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:03 vm06 bash[28114]: audit 2026-04-15T13:50:02.889527+0000 mon.vm09 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.106:0/632776954' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:04.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:03 vm06 bash[28114]: audit 2026-04-15T13:50:02.889527+0000 mon.vm09 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.106:0/632776954' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:03 vm06 bash[28114]: cluster 2026-04-15T13:50:03.423912+0000 mgr.vm06.qbbldl (mgr.14229) 932 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:04.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:03 vm06 bash[28114]: cluster 2026-04-15T13:50:03.423912+0000 mgr.vm06.qbbldl (mgr.14229) 932 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:03 vm09 bash[34466]: audit 2026-04-15T13:50:02.627371+0000 mgr.vm06.qbbldl (mgr.14229) 931 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:03 vm09 bash[34466]: audit 2026-04-15T13:50:02.627371+0000 mgr.vm06.qbbldl (mgr.14229) 931 : audit [DBG] from='client.16650 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:03 vm09 bash[34466]: audit 2026-04-15T13:50:02.889527+0000 mon.vm09 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.106:0/632776954' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:03 vm09 bash[34466]: audit 2026-04-15T13:50:02.889527+0000 mon.vm09 (mon.1) 36 : audit [DBG] from='client.? 192.168.123.106:0/632776954' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:03 vm09 bash[34466]: cluster 2026-04-15T13:50:03.423912+0000 mgr.vm06.qbbldl (mgr.14229) 932 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:04.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:03 vm09 bash[34466]: cluster 2026-04-15T13:50:03.423912+0000 mgr.vm06.qbbldl (mgr.14229) 932 : cluster [DBG] pgmap v521: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:05.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:05 vm09 bash[34466]: cluster 2026-04-15T13:50:05.424430+0000 mgr.vm06.qbbldl (mgr.14229) 933 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:05.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:05 vm09 bash[34466]: cluster 2026-04-15T13:50:05.424430+0000 mgr.vm06.qbbldl (mgr.14229) 933 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:06.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:05 vm06 bash[28114]: cluster 2026-04-15T13:50:05.424430+0000 mgr.vm06.qbbldl (mgr.14229) 933 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:06.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:05 vm06 bash[28114]: cluster 2026-04-15T13:50:05.424430+0000 mgr.vm06.qbbldl (mgr.14229) 933 : cluster [DBG] pgmap v522: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:08.128 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:08.317 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:08.317 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:08.317 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:08.317 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 2m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:08.317 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 2m ago 14m - - 2026-04-15T13:50:08.575 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:08.575 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:08.576 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:08.887 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:08 vm06 bash[28114]: cluster 2026-04-15T13:50:07.424995+0000 mgr.vm06.qbbldl (mgr.14229) 934 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:08.887 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:08 vm06 bash[28114]: cluster 2026-04-15T13:50:07.424995+0000 mgr.vm06.qbbldl (mgr.14229) 934 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:09.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:08 vm09 bash[34466]: cluster 2026-04-15T13:50:07.424995+0000 mgr.vm06.qbbldl (mgr.14229) 934 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:09.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:08 vm09 bash[34466]: cluster 2026-04-15T13:50:07.424995+0000 mgr.vm06.qbbldl (mgr.14229) 934 : cluster [DBG] pgmap v523: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:10.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.101677+0000 mgr.vm06.qbbldl (mgr.14229) 935 : audit [DBG] from='client.16658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.101677+0000 mgr.vm06.qbbldl (mgr.14229) 935 : audit [DBG] from='client.16658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.310499+0000 mgr.vm06.qbbldl (mgr.14229) 936 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.310499+0000 mgr.vm06.qbbldl (mgr.14229) 936 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.571424+0000 mon.vm06 (mon.0) 1292 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:10.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.571424+0000 mon.vm06 (mon.0) 1292 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.571614+0000 mon.vm06 (mon.0) 1293 : audit [DBG] from='client.? 192.168.123.106:0/1760404536' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: audit 2026-04-15T13:50:08.571614+0000 mon.vm06 (mon.0) 1293 : audit [DBG] from='client.? 192.168.123.106:0/1760404536' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: cluster 2026-04-15T13:50:09.425544+0000 mgr.vm06.qbbldl (mgr.14229) 937 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:10.017 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:09 vm06 bash[28114]: cluster 2026-04-15T13:50:09.425544+0000 mgr.vm06.qbbldl (mgr.14229) 937 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.101677+0000 mgr.vm06.qbbldl (mgr.14229) 935 : audit [DBG] from='client.16658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.101677+0000 mgr.vm06.qbbldl (mgr.14229) 935 : audit [DBG] from='client.16658 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.310499+0000 mgr.vm06.qbbldl (mgr.14229) 936 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.310499+0000 mgr.vm06.qbbldl (mgr.14229) 936 : audit [DBG] from='client.16662 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.571424+0000 mon.vm06 (mon.0) 1292 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.571424+0000 mon.vm06 (mon.0) 1292 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.571614+0000 mon.vm06 (mon.0) 1293 : audit [DBG] from='client.? 192.168.123.106:0/1760404536' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: audit 2026-04-15T13:50:08.571614+0000 mon.vm06 (mon.0) 1293 : audit [DBG] from='client.? 192.168.123.106:0/1760404536' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: cluster 2026-04-15T13:50:09.425544+0000 mgr.vm06.qbbldl (mgr.14229) 937 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:10.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:09 vm09 bash[34466]: cluster 2026-04-15T13:50:09.425544+0000 mgr.vm06.qbbldl (mgr.14229) 937 : cluster [DBG] pgmap v524: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:11 vm06 bash[28114]: cluster 2026-04-15T13:50:11.426176+0000 mgr.vm06.qbbldl (mgr.14229) 938 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:11 vm06 bash[28114]: cluster 2026-04-15T13:50:11.426176+0000 mgr.vm06.qbbldl (mgr.14229) 938 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:11 vm06 bash[28114]: audit 2026-04-15T13:50:11.439710+0000 mon.vm06 (mon.0) 1294 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:50:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:11 vm06 bash[28114]: audit 2026-04-15T13:50:11.439710+0000 mon.vm06 (mon.0) 1294 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:50:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:11 vm09 bash[34466]: cluster 2026-04-15T13:50:11.426176+0000 mgr.vm06.qbbldl (mgr.14229) 938 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:11 vm09 bash[34466]: cluster 2026-04-15T13:50:11.426176+0000 mgr.vm06.qbbldl (mgr.14229) 938 : cluster [DBG] pgmap v525: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:11 vm09 bash[34466]: audit 2026-04-15T13:50:11.439710+0000 mon.vm06 (mon.0) 1294 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:50:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:11 vm09 bash[34466]: audit 2026-04-15T13:50:11.439710+0000 mon.vm06 (mon.0) 1294 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.825067+0000 mon.vm06 (mon.0) 1295 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.825067+0000 mon.vm06 (mon.0) 1295 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.825593+0000 mon.vm06 (mon.0) 1296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.825593+0000 mon.vm06 (mon.0) 1296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: cluster 2026-04-15T13:50:11.826852+0000 mgr.vm06.qbbldl (mgr.14229) 939 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: cluster 2026-04-15T13:50:11.826852+0000 mgr.vm06.qbbldl (mgr.14229) 939 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.831379+0000 mon.vm06 (mon.0) 1297 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.831379+0000 mon.vm06 (mon.0) 1297 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.832819+0000 mon.vm06 (mon.0) 1298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:50:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:12 vm06 bash[28114]: audit 2026-04-15T13:50:11.832819+0000 mon.vm06 (mon.0) 1298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.825067+0000 mon.vm06 (mon.0) 1295 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.825067+0000 mon.vm06 (mon.0) 1295 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.825593+0000 mon.vm06 (mon.0) 1296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.825593+0000 mon.vm06 (mon.0) 1296 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: cluster 2026-04-15T13:50:11.826852+0000 mgr.vm06.qbbldl (mgr.14229) 939 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: cluster 2026-04-15T13:50:11.826852+0000 mgr.vm06.qbbldl (mgr.14229) 939 : cluster [DBG] pgmap v526: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.831379+0000 mon.vm06 (mon.0) 1297 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.831379+0000 mon.vm06 (mon.0) 1297 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.832819+0000 mon.vm06 (mon.0) 1298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:50:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:12 vm09 bash[34466]: audit 2026-04-15T13:50:11.832819+0000 mon.vm06 (mon.0) 1298 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:50:13.815 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:14.018 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:14.018 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (13m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:14.018 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:14.018 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:14.018 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 14m - - 2026-04-15T13:50:14.261 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:14.261 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:14.261 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:15.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: audit 2026-04-15T13:50:13.793162+0000 mgr.vm06.qbbldl (mgr.14229) 940 : audit [DBG] from='client.16670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: audit 2026-04-15T13:50:13.793162+0000 mgr.vm06.qbbldl (mgr.14229) 940 : audit [DBG] from='client.16670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: cluster 2026-04-15T13:50:13.827410+0000 mgr.vm06.qbbldl (mgr.14229) 941 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:15.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: cluster 2026-04-15T13:50:13.827410+0000 mgr.vm06.qbbldl (mgr.14229) 941 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:15.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: audit 2026-04-15T13:50:14.010626+0000 mgr.vm06.qbbldl (mgr.14229) 942 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: audit 2026-04-15T13:50:14.010626+0000 mgr.vm06.qbbldl (mgr.14229) 942 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: audit 2026-04-15T13:50:14.257901+0000 mon.vm06 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.106:0/313732113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:15.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:14 vm06 bash[28114]: audit 2026-04-15T13:50:14.257901+0000 mon.vm06 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.106:0/313732113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: audit 2026-04-15T13:50:13.793162+0000 mgr.vm06.qbbldl (mgr.14229) 940 : audit [DBG] from='client.16670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: audit 2026-04-15T13:50:13.793162+0000 mgr.vm06.qbbldl (mgr.14229) 940 : audit [DBG] from='client.16670 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: cluster 2026-04-15T13:50:13.827410+0000 mgr.vm06.qbbldl (mgr.14229) 941 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: cluster 2026-04-15T13:50:13.827410+0000 mgr.vm06.qbbldl (mgr.14229) 941 : cluster [DBG] pgmap v527: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: audit 2026-04-15T13:50:14.010626+0000 mgr.vm06.qbbldl (mgr.14229) 942 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: audit 2026-04-15T13:50:14.010626+0000 mgr.vm06.qbbldl (mgr.14229) 942 : audit [DBG] from='client.16674 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: audit 2026-04-15T13:50:14.257901+0000 mon.vm06 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.106:0/313732113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:15.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:14 vm09 bash[34466]: audit 2026-04-15T13:50:14.257901+0000 mon.vm06 (mon.0) 1299 : audit [DBG] from='client.? 192.168.123.106:0/313732113' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:16 vm06 bash[28114]: cluster 2026-04-15T13:50:15.827959+0000 mgr.vm06.qbbldl (mgr.14229) 943 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:17.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:16 vm06 bash[28114]: cluster 2026-04-15T13:50:15.827959+0000 mgr.vm06.qbbldl (mgr.14229) 943 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:16 vm09 bash[34466]: cluster 2026-04-15T13:50:15.827959+0000 mgr.vm06.qbbldl (mgr.14229) 943 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:17.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:16 vm09 bash[34466]: cluster 2026-04-15T13:50:15.827959+0000 mgr.vm06.qbbldl (mgr.14229) 943 : cluster [DBG] pgmap v528: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:19.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:18 vm06 bash[28114]: cluster 2026-04-15T13:50:17.828365+0000 mgr.vm06.qbbldl (mgr.14229) 944 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:19.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:18 vm06 bash[28114]: cluster 2026-04-15T13:50:17.828365+0000 mgr.vm06.qbbldl (mgr.14229) 944 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:19.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:18 vm09 bash[34466]: cluster 2026-04-15T13:50:17.828365+0000 mgr.vm06.qbbldl (mgr.14229) 944 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:19.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:18 vm09 bash[34466]: cluster 2026-04-15T13:50:17.828365+0000 mgr.vm06.qbbldl (mgr.14229) 944 : cluster [DBG] pgmap v529: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 196 B/s wr, 0 op/s 2026-04-15T13:50:19.477 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:19.667 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:19.667 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:19.668 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:19.668 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:19.668 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 14m - - 2026-04-15T13:50:19.913 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:19.913 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:19.913 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:20.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:19 vm06 bash[28114]: audit 2026-04-15T13:50:19.454471+0000 mgr.vm06.qbbldl (mgr.14229) 945 : audit [DBG] from='client.16682 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:20.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:19 vm06 bash[28114]: audit 2026-04-15T13:50:19.454471+0000 mgr.vm06.qbbldl (mgr.14229) 945 : audit [DBG] from='client.16682 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:20.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:19 vm09 bash[34466]: audit 2026-04-15T13:50:19.454471+0000 mgr.vm06.qbbldl (mgr.14229) 945 : audit [DBG] from='client.16682 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:20.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:19 vm09 bash[34466]: audit 2026-04-15T13:50:19.454471+0000 mgr.vm06.qbbldl (mgr.14229) 945 : audit [DBG] from='client.16682 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:21.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:20 vm06 bash[28114]: audit 2026-04-15T13:50:19.660609+0000 mgr.vm06.qbbldl (mgr.14229) 946 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:21.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:20 vm06 bash[28114]: audit 2026-04-15T13:50:19.660609+0000 mgr.vm06.qbbldl (mgr.14229) 946 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:21.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:20 vm06 bash[28114]: cluster 2026-04-15T13:50:19.828823+0000 mgr.vm06.qbbldl (mgr.14229) 947 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:21.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:20 vm06 bash[28114]: cluster 2026-04-15T13:50:19.828823+0000 mgr.vm06.qbbldl (mgr.14229) 947 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:21.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:20 vm06 bash[28114]: audit 2026-04-15T13:50:19.909652+0000 mon.vm06 (mon.0) 1300 : audit [DBG] from='client.? 192.168.123.106:0/1059811434' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:21.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:20 vm06 bash[28114]: audit 2026-04-15T13:50:19.909652+0000 mon.vm06 (mon.0) 1300 : audit [DBG] from='client.? 192.168.123.106:0/1059811434' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:20 vm09 bash[34466]: audit 2026-04-15T13:50:19.660609+0000 mgr.vm06.qbbldl (mgr.14229) 946 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:20 vm09 bash[34466]: audit 2026-04-15T13:50:19.660609+0000 mgr.vm06.qbbldl (mgr.14229) 946 : audit [DBG] from='client.16686 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:20 vm09 bash[34466]: cluster 2026-04-15T13:50:19.828823+0000 mgr.vm06.qbbldl (mgr.14229) 947 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:20 vm09 bash[34466]: cluster 2026-04-15T13:50:19.828823+0000 mgr.vm06.qbbldl (mgr.14229) 947 : cluster [DBG] pgmap v530: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:20 vm09 bash[34466]: audit 2026-04-15T13:50:19.909652+0000 mon.vm06 (mon.0) 1300 : audit [DBG] from='client.? 192.168.123.106:0/1059811434' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:21.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:20 vm09 bash[34466]: audit 2026-04-15T13:50:19.909652+0000 mon.vm06 (mon.0) 1300 : audit [DBG] from='client.? 192.168.123.106:0/1059811434' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:23.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:22 vm06 bash[28114]: cluster 2026-04-15T13:50:21.829362+0000 mgr.vm06.qbbldl (mgr.14229) 948 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:23.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:22 vm06 bash[28114]: cluster 2026-04-15T13:50:21.829362+0000 mgr.vm06.qbbldl (mgr.14229) 948 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:23.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:22 vm09 bash[34466]: cluster 2026-04-15T13:50:21.829362+0000 mgr.vm06.qbbldl (mgr.14229) 948 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:23.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:22 vm09 bash[34466]: cluster 2026-04-15T13:50:21.829362+0000 mgr.vm06.qbbldl (mgr.14229) 948 : cluster [DBG] pgmap v531: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:24.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:23 vm06 bash[28114]: audit 2026-04-15T13:50:23.496031+0000 mon.vm06 (mon.0) 1301 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:24.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:23 vm06 bash[28114]: audit 2026-04-15T13:50:23.496031+0000 mon.vm06 (mon.0) 1301 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:23 vm09 bash[34466]: audit 2026-04-15T13:50:23.496031+0000 mon.vm06 (mon.0) 1301 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:24.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:23 vm09 bash[34466]: audit 2026-04-15T13:50:23.496031+0000 mon.vm06 (mon.0) 1301 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:25.139 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:25.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:24 vm06 bash[28114]: cluster 2026-04-15T13:50:23.829868+0000 mgr.vm06.qbbldl (mgr.14229) 949 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:25.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:24 vm06 bash[28114]: cluster 2026-04-15T13:50:23.829868+0000 mgr.vm06.qbbldl (mgr.14229) 949 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:25.321 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:25.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 8m ago 14m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:25.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 14m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:25.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 14m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:25.321 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 14m - - 2026-04-15T13:50:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:24 vm09 bash[34466]: cluster 2026-04-15T13:50:23.829868+0000 mgr.vm06.qbbldl (mgr.14229) 949 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:25.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:24 vm09 bash[34466]: cluster 2026-04-15T13:50:23.829868+0000 mgr.vm06.qbbldl (mgr.14229) 949 : cluster [DBG] pgmap v532: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:25.554 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:25.554 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:25.554 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:26.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:25 vm06 bash[28114]: audit 2026-04-15T13:50:25.116867+0000 mgr.vm06.qbbldl (mgr.14229) 950 : audit [DBG] from='client.16694 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:25 vm06 bash[28114]: audit 2026-04-15T13:50:25.116867+0000 mgr.vm06.qbbldl (mgr.14229) 950 : audit [DBG] from='client.16694 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:25 vm06 bash[28114]: audit 2026-04-15T13:50:25.314892+0000 mgr.vm06.qbbldl (mgr.14229) 951 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:25 vm06 bash[28114]: audit 2026-04-15T13:50:25.314892+0000 mgr.vm06.qbbldl (mgr.14229) 951 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:25 vm06 bash[28114]: audit 2026-04-15T13:50:25.550724+0000 mon.vm06 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.106:0/1432021538' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:26.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:25 vm06 bash[28114]: audit 2026-04-15T13:50:25.550724+0000 mon.vm06 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.106:0/1432021538' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:25 vm09 bash[34466]: audit 2026-04-15T13:50:25.116867+0000 mgr.vm06.qbbldl (mgr.14229) 950 : audit [DBG] from='client.16694 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:25 vm09 bash[34466]: audit 2026-04-15T13:50:25.116867+0000 mgr.vm06.qbbldl (mgr.14229) 950 : audit [DBG] from='client.16694 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:25 vm09 bash[34466]: audit 2026-04-15T13:50:25.314892+0000 mgr.vm06.qbbldl (mgr.14229) 951 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:25 vm09 bash[34466]: audit 2026-04-15T13:50:25.314892+0000 mgr.vm06.qbbldl (mgr.14229) 951 : audit [DBG] from='client.16698 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:25 vm09 bash[34466]: audit 2026-04-15T13:50:25.550724+0000 mon.vm06 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.106:0/1432021538' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:26.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:25 vm09 bash[34466]: audit 2026-04-15T13:50:25.550724+0000 mon.vm06 (mon.0) 1302 : audit [DBG] from='client.? 192.168.123.106:0/1432021538' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:27.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:26 vm06 bash[28114]: cluster 2026-04-15T13:50:25.830437+0000 mgr.vm06.qbbldl (mgr.14229) 952 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:27.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:26 vm06 bash[28114]: cluster 2026-04-15T13:50:25.830437+0000 mgr.vm06.qbbldl (mgr.14229) 952 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:26 vm09 bash[34466]: cluster 2026-04-15T13:50:25.830437+0000 mgr.vm06.qbbldl (mgr.14229) 952 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:27.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:26 vm09 bash[34466]: cluster 2026-04-15T13:50:25.830437+0000 mgr.vm06.qbbldl (mgr.14229) 952 : cluster [DBG] pgmap v533: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:29.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:28 vm06 bash[28114]: cluster 2026-04-15T13:50:27.830887+0000 mgr.vm06.qbbldl (mgr.14229) 953 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:29.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:28 vm06 bash[28114]: cluster 2026-04-15T13:50:27.830887+0000 mgr.vm06.qbbldl (mgr.14229) 953 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:29.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:28 vm09 bash[34466]: cluster 2026-04-15T13:50:27.830887+0000 mgr.vm06.qbbldl (mgr.14229) 953 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:29.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:28 vm09 bash[34466]: cluster 2026-04-15T13:50:27.830887+0000 mgr.vm06.qbbldl (mgr.14229) 953 : cluster [DBG] pgmap v534: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:30.782 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:30.987 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:30.987 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 8m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:30.987 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:30.987 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:30.987 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:50:31.236 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:31.236 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:31.236 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:31.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:30 vm06 bash[28114]: cluster 2026-04-15T13:50:29.831448+0000 mgr.vm06.qbbldl (mgr.14229) 954 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:31.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:30 vm06 bash[28114]: cluster 2026-04-15T13:50:29.831448+0000 mgr.vm06.qbbldl (mgr.14229) 954 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:30 vm09 bash[34466]: cluster 2026-04-15T13:50:29.831448+0000 mgr.vm06.qbbldl (mgr.14229) 954 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:31.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:30 vm09 bash[34466]: cluster 2026-04-15T13:50:29.831448+0000 mgr.vm06.qbbldl (mgr.14229) 954 : cluster [DBG] pgmap v535: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:32.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:31 vm06 bash[28114]: audit 2026-04-15T13:50:30.755763+0000 mgr.vm06.qbbldl (mgr.14229) 955 : audit [DBG] from='client.16706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:31 vm06 bash[28114]: audit 2026-04-15T13:50:30.755763+0000 mgr.vm06.qbbldl (mgr.14229) 955 : audit [DBG] from='client.16706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:31 vm06 bash[28114]: audit 2026-04-15T13:50:30.978916+0000 mgr.vm06.qbbldl (mgr.14229) 956 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:31 vm06 bash[28114]: audit 2026-04-15T13:50:30.978916+0000 mgr.vm06.qbbldl (mgr.14229) 956 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:31 vm06 bash[28114]: audit 2026-04-15T13:50:31.232615+0000 mon.vm06 (mon.0) 1303 : audit [DBG] from='client.? 192.168.123.106:0/331496454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:32.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:31 vm06 bash[28114]: audit 2026-04-15T13:50:31.232615+0000 mon.vm06 (mon.0) 1303 : audit [DBG] from='client.? 192.168.123.106:0/331496454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:32.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:31 vm09 bash[34466]: audit 2026-04-15T13:50:30.755763+0000 mgr.vm06.qbbldl (mgr.14229) 955 : audit [DBG] from='client.16706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:31 vm09 bash[34466]: audit 2026-04-15T13:50:30.755763+0000 mgr.vm06.qbbldl (mgr.14229) 955 : audit [DBG] from='client.16706 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:31 vm09 bash[34466]: audit 2026-04-15T13:50:30.978916+0000 mgr.vm06.qbbldl (mgr.14229) 956 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:31 vm09 bash[34466]: audit 2026-04-15T13:50:30.978916+0000 mgr.vm06.qbbldl (mgr.14229) 956 : audit [DBG] from='client.16710 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:32.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:31 vm09 bash[34466]: audit 2026-04-15T13:50:31.232615+0000 mon.vm06 (mon.0) 1303 : audit [DBG] from='client.? 192.168.123.106:0/331496454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:32.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:31 vm09 bash[34466]: audit 2026-04-15T13:50:31.232615+0000 mon.vm06 (mon.0) 1303 : audit [DBG] from='client.? 192.168.123.106:0/331496454' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:33.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:32 vm06 bash[28114]: cluster 2026-04-15T13:50:31.832057+0000 mgr.vm06.qbbldl (mgr.14229) 957 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:33.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:32 vm06 bash[28114]: cluster 2026-04-15T13:50:31.832057+0000 mgr.vm06.qbbldl (mgr.14229) 957 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:33.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:32 vm09 bash[34466]: cluster 2026-04-15T13:50:31.832057+0000 mgr.vm06.qbbldl (mgr.14229) 957 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:33.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:32 vm09 bash[34466]: cluster 2026-04-15T13:50:31.832057+0000 mgr.vm06.qbbldl (mgr.14229) 957 : cluster [DBG] pgmap v536: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:35.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:34 vm06 bash[28114]: cluster 2026-04-15T13:50:33.832552+0000 mgr.vm06.qbbldl (mgr.14229) 958 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:35.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:34 vm06 bash[28114]: cluster 2026-04-15T13:50:33.832552+0000 mgr.vm06.qbbldl (mgr.14229) 958 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:35.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:34 vm09 bash[34466]: cluster 2026-04-15T13:50:33.832552+0000 mgr.vm06.qbbldl (mgr.14229) 958 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:35.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:34 vm09 bash[34466]: cluster 2026-04-15T13:50:33.832552+0000 mgr.vm06.qbbldl (mgr.14229) 958 : cluster [DBG] pgmap v537: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:36.457 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:36.670 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:36.670 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 8m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:36.670 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (8m) 8m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:36.670 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:36.670 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:50:36.930 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:36.930 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:36.930 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:37.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:36 vm06 bash[28114]: cluster 2026-04-15T13:50:35.833166+0000 mgr.vm06.qbbldl (mgr.14229) 959 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:37.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:36 vm06 bash[28114]: cluster 2026-04-15T13:50:35.833166+0000 mgr.vm06.qbbldl (mgr.14229) 959 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:37.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:36 vm06 bash[28114]: audit 2026-04-15T13:50:36.434067+0000 mgr.vm06.qbbldl (mgr.14229) 960 : audit [DBG] from='client.16718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:37.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:36 vm06 bash[28114]: audit 2026-04-15T13:50:36.434067+0000 mgr.vm06.qbbldl (mgr.14229) 960 : audit [DBG] from='client.16718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:37.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:36 vm06 bash[28114]: audit 2026-04-15T13:50:36.926275+0000 mon.vm06 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.106:0/70759176' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:37.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:36 vm06 bash[28114]: audit 2026-04-15T13:50:36.926275+0000 mon.vm06 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.106:0/70759176' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:37.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:36 vm09 bash[34466]: cluster 2026-04-15T13:50:35.833166+0000 mgr.vm06.qbbldl (mgr.14229) 959 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:37.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:36 vm09 bash[34466]: cluster 2026-04-15T13:50:35.833166+0000 mgr.vm06.qbbldl (mgr.14229) 959 : cluster [DBG] pgmap v538: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:37.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:36 vm09 bash[34466]: audit 2026-04-15T13:50:36.434067+0000 mgr.vm06.qbbldl (mgr.14229) 960 : audit [DBG] from='client.16718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:37.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:36 vm09 bash[34466]: audit 2026-04-15T13:50:36.434067+0000 mgr.vm06.qbbldl (mgr.14229) 960 : audit [DBG] from='client.16718 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:37.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:36 vm09 bash[34466]: audit 2026-04-15T13:50:36.926275+0000 mon.vm06 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.106:0/70759176' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:37.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:36 vm09 bash[34466]: audit 2026-04-15T13:50:36.926275+0000 mon.vm06 (mon.0) 1304 : audit [DBG] from='client.? 192.168.123.106:0/70759176' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:38.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:37 vm06 bash[28114]: audit 2026-04-15T13:50:36.663578+0000 mgr.vm06.qbbldl (mgr.14229) 961 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:38.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:37 vm06 bash[28114]: audit 2026-04-15T13:50:36.663578+0000 mgr.vm06.qbbldl (mgr.14229) 961 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:38.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:37 vm09 bash[34466]: audit 2026-04-15T13:50:36.663578+0000 mgr.vm06.qbbldl (mgr.14229) 961 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:38.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:37 vm09 bash[34466]: audit 2026-04-15T13:50:36.663578+0000 mgr.vm06.qbbldl (mgr.14229) 961 : audit [DBG] from='client.16722 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:39.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:38 vm06 bash[28114]: cluster 2026-04-15T13:50:37.834824+0000 mgr.vm06.qbbldl (mgr.14229) 962 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:39.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:38 vm06 bash[28114]: cluster 2026-04-15T13:50:37.834824+0000 mgr.vm06.qbbldl (mgr.14229) 962 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:39.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:38 vm06 bash[28114]: audit 2026-04-15T13:50:38.495852+0000 mon.vm06 (mon.0) 1305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:39.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:38 vm06 bash[28114]: audit 2026-04-15T13:50:38.495852+0000 mon.vm06 (mon.0) 1305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:39.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:38 vm09 bash[34466]: cluster 2026-04-15T13:50:37.834824+0000 mgr.vm06.qbbldl (mgr.14229) 962 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:39.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:38 vm09 bash[34466]: cluster 2026-04-15T13:50:37.834824+0000 mgr.vm06.qbbldl (mgr.14229) 962 : cluster [DBG] pgmap v539: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:39.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:38 vm09 bash[34466]: audit 2026-04-15T13:50:38.495852+0000 mon.vm06 (mon.0) 1305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:39.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:38 vm09 bash[34466]: audit 2026-04-15T13:50:38.495852+0000 mon.vm06 (mon.0) 1305 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:41.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:41 vm06 bash[28114]: cluster 2026-04-15T13:50:39.835251+0000 mgr.vm06.qbbldl (mgr.14229) 963 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:41.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:41 vm06 bash[28114]: cluster 2026-04-15T13:50:39.835251+0000 mgr.vm06.qbbldl (mgr.14229) 963 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:41.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:41 vm09 bash[34466]: cluster 2026-04-15T13:50:39.835251+0000 mgr.vm06.qbbldl (mgr.14229) 963 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:41.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:41 vm09 bash[34466]: cluster 2026-04-15T13:50:39.835251+0000 mgr.vm06.qbbldl (mgr.14229) 963 : cluster [DBG] pgmap v540: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:42.158 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:42.359 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:42.359 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 8m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:42.359 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 8m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:42.359 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:42.359 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:50:42.624 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:42.624 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:42.624 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: cluster 2026-04-15T13:50:41.835770+0000 mgr.vm06.qbbldl (mgr.14229) 964 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: cluster 2026-04-15T13:50:41.835770+0000 mgr.vm06.qbbldl (mgr.14229) 964 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: audit 2026-04-15T13:50:42.133223+0000 mgr.vm06.qbbldl (mgr.14229) 965 : audit [DBG] from='client.16730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: audit 2026-04-15T13:50:42.133223+0000 mgr.vm06.qbbldl (mgr.14229) 965 : audit [DBG] from='client.16730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: audit 2026-04-15T13:50:42.352861+0000 mgr.vm06.qbbldl (mgr.14229) 966 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: audit 2026-04-15T13:50:42.352861+0000 mgr.vm06.qbbldl (mgr.14229) 966 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: audit 2026-04-15T13:50:42.620980+0000 mon.vm06 (mon.0) 1306 : audit [DBG] from='client.? 192.168.123.106:0/2151753015' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:43.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:43 vm09 bash[34466]: audit 2026-04-15T13:50:42.620980+0000 mon.vm06 (mon.0) 1306 : audit [DBG] from='client.? 192.168.123.106:0/2151753015' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:43.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: cluster 2026-04-15T13:50:41.835770+0000 mgr.vm06.qbbldl (mgr.14229) 964 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:43.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: cluster 2026-04-15T13:50:41.835770+0000 mgr.vm06.qbbldl (mgr.14229) 964 : cluster [DBG] pgmap v541: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:43.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: audit 2026-04-15T13:50:42.133223+0000 mgr.vm06.qbbldl (mgr.14229) 965 : audit [DBG] from='client.16730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: audit 2026-04-15T13:50:42.133223+0000 mgr.vm06.qbbldl (mgr.14229) 965 : audit [DBG] from='client.16730 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: audit 2026-04-15T13:50:42.352861+0000 mgr.vm06.qbbldl (mgr.14229) 966 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: audit 2026-04-15T13:50:42.352861+0000 mgr.vm06.qbbldl (mgr.14229) 966 : audit [DBG] from='client.16734 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:43.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: audit 2026-04-15T13:50:42.620980+0000 mon.vm06 (mon.0) 1306 : audit [DBG] from='client.? 192.168.123.106:0/2151753015' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:43.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:43 vm06 bash[28114]: audit 2026-04-15T13:50:42.620980+0000 mon.vm06 (mon.0) 1306 : audit [DBG] from='client.? 192.168.123.106:0/2151753015' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:45 vm09 bash[34466]: cluster 2026-04-15T13:50:43.836328+0000 mgr.vm06.qbbldl (mgr.14229) 967 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:45.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:45 vm09 bash[34466]: cluster 2026-04-15T13:50:43.836328+0000 mgr.vm06.qbbldl (mgr.14229) 967 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:45.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:45 vm06 bash[28114]: cluster 2026-04-15T13:50:43.836328+0000 mgr.vm06.qbbldl (mgr.14229) 967 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:45.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:45 vm06 bash[28114]: cluster 2026-04-15T13:50:43.836328+0000 mgr.vm06.qbbldl (mgr.14229) 967 : cluster [DBG] pgmap v542: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:47 vm09 bash[34466]: cluster 2026-04-15T13:50:45.836930+0000 mgr.vm06.qbbldl (mgr.14229) 968 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:47.359 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:47 vm09 bash[34466]: cluster 2026-04-15T13:50:45.836930+0000 mgr.vm06.qbbldl (mgr.14229) 968 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:47 vm06 bash[28114]: cluster 2026-04-15T13:50:45.836930+0000 mgr.vm06.qbbldl (mgr.14229) 968 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:47.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:47 vm06 bash[28114]: cluster 2026-04-15T13:50:45.836930+0000 mgr.vm06.qbbldl (mgr.14229) 968 : cluster [DBG] pgmap v543: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:47.841 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:48.056 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:48.056 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:48.056 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:48.056 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (3m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:48.056 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:50:48.324 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:48.324 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:48.324 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:49.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: audit 2026-04-15T13:50:47.815365+0000 mgr.vm06.qbbldl (mgr.14229) 969 : audit [DBG] from='client.16742 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: audit 2026-04-15T13:50:47.815365+0000 mgr.vm06.qbbldl (mgr.14229) 969 : audit [DBG] from='client.16742 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.266 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: cluster 2026-04-15T13:50:47.837333+0000 mgr.vm06.qbbldl (mgr.14229) 970 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: cluster 2026-04-15T13:50:47.837333+0000 mgr.vm06.qbbldl (mgr.14229) 970 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: audit 2026-04-15T13:50:48.050001+0000 mgr.vm06.qbbldl (mgr.14229) 971 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: audit 2026-04-15T13:50:48.050001+0000 mgr.vm06.qbbldl (mgr.14229) 971 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: audit 2026-04-15T13:50:48.320211+0000 mon.vm06 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.106:0/3437627951' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:49.267 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:49 vm06 bash[28114]: audit 2026-04-15T13:50:48.320211+0000 mon.vm06 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.106:0/3437627951' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: audit 2026-04-15T13:50:47.815365+0000 mgr.vm06.qbbldl (mgr.14229) 969 : audit [DBG] from='client.16742 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: audit 2026-04-15T13:50:47.815365+0000 mgr.vm06.qbbldl (mgr.14229) 969 : audit [DBG] from='client.16742 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: cluster 2026-04-15T13:50:47.837333+0000 mgr.vm06.qbbldl (mgr.14229) 970 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: cluster 2026-04-15T13:50:47.837333+0000 mgr.vm06.qbbldl (mgr.14229) 970 : cluster [DBG] pgmap v544: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: audit 2026-04-15T13:50:48.050001+0000 mgr.vm06.qbbldl (mgr.14229) 971 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: audit 2026-04-15T13:50:48.050001+0000 mgr.vm06.qbbldl (mgr.14229) 971 : audit [DBG] from='client.16746 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: audit 2026-04-15T13:50:48.320211+0000 mon.vm06 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.106:0/3437627951' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:49.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:49 vm09 bash[34466]: audit 2026-04-15T13:50:48.320211+0000 mon.vm06 (mon.0) 1307 : audit [DBG] from='client.? 192.168.123.106:0/3437627951' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:51.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:51 vm06 bash[28114]: cluster 2026-04-15T13:50:49.837761+0000 mgr.vm06.qbbldl (mgr.14229) 972 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:51.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:51 vm06 bash[28114]: cluster 2026-04-15T13:50:49.837761+0000 mgr.vm06.qbbldl (mgr.14229) 972 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:51 vm09 bash[34466]: cluster 2026-04-15T13:50:49.837761+0000 mgr.vm06.qbbldl (mgr.14229) 972 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:51 vm09 bash[34466]: cluster 2026-04-15T13:50:49.837761+0000 mgr.vm06.qbbldl (mgr.14229) 972 : cluster [DBG] pgmap v545: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:50:53.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:53 vm06 bash[28114]: cluster 2026-04-15T13:50:51.838367+0000 mgr.vm06.qbbldl (mgr.14229) 973 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:53.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:53 vm06 bash[28114]: cluster 2026-04-15T13:50:51.838367+0000 mgr.vm06.qbbldl (mgr.14229) 973 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:53.554 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:53 vm09 bash[34466]: cluster 2026-04-15T13:50:51.838367+0000 mgr.vm06.qbbldl (mgr.14229) 973 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:53.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:53 vm09 bash[34466]: cluster 2026-04-15T13:50:51.838367+0000 mgr.vm06.qbbldl (mgr.14229) 973 : cluster [DBG] pgmap v546: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:53.757 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:53.757 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:53.757 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:53.757 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:53.757 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:50:54.007 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:54.007 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:54.007 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:50:54.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:54 vm06 bash[28114]: audit 2026-04-15T13:50:53.496190+0000 mon.vm06 (mon.0) 1308 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:54.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:54 vm06 bash[28114]: audit 2026-04-15T13:50:53.496190+0000 mon.vm06 (mon.0) 1308 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:54.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:54 vm06 bash[28114]: audit 2026-04-15T13:50:54.003907+0000 mon.vm06 (mon.0) 1309 : audit [DBG] from='client.? 192.168.123.106:0/1725703807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:54.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:54 vm06 bash[28114]: audit 2026-04-15T13:50:54.003907+0000 mon.vm06 (mon.0) 1309 : audit [DBG] from='client.? 192.168.123.106:0/1725703807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:54 vm09 bash[34466]: audit 2026-04-15T13:50:53.496190+0000 mon.vm06 (mon.0) 1308 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:54 vm09 bash[34466]: audit 2026-04-15T13:50:53.496190+0000 mon.vm06 (mon.0) 1308 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:50:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:54 vm09 bash[34466]: audit 2026-04-15T13:50:54.003907+0000 mon.vm06 (mon.0) 1309 : audit [DBG] from='client.? 192.168.123.106:0/1725703807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:54.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:54 vm09 bash[34466]: audit 2026-04-15T13:50:54.003907+0000 mon.vm06 (mon.0) 1309 : audit [DBG] from='client.? 192.168.123.106:0/1725703807' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:50:55.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:55 vm06 bash[28114]: audit 2026-04-15T13:50:53.530719+0000 mgr.vm06.qbbldl (mgr.14229) 974 : audit [DBG] from='client.16754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:55 vm06 bash[28114]: audit 2026-04-15T13:50:53.530719+0000 mgr.vm06.qbbldl (mgr.14229) 974 : audit [DBG] from='client.16754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:55 vm06 bash[28114]: audit 2026-04-15T13:50:53.750999+0000 mgr.vm06.qbbldl (mgr.14229) 975 : audit [DBG] from='client.16758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:55 vm06 bash[28114]: audit 2026-04-15T13:50:53.750999+0000 mgr.vm06.qbbldl (mgr.14229) 975 : audit [DBG] from='client.16758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:55 vm06 bash[28114]: cluster 2026-04-15T13:50:53.838919+0000 mgr.vm06.qbbldl (mgr.14229) 976 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:55.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:55 vm06 bash[28114]: cluster 2026-04-15T13:50:53.838919+0000 mgr.vm06.qbbldl (mgr.14229) 976 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:55 vm09 bash[34466]: audit 2026-04-15T13:50:53.530719+0000 mgr.vm06.qbbldl (mgr.14229) 974 : audit [DBG] from='client.16754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:55 vm09 bash[34466]: audit 2026-04-15T13:50:53.530719+0000 mgr.vm06.qbbldl (mgr.14229) 974 : audit [DBG] from='client.16754 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:55 vm09 bash[34466]: audit 2026-04-15T13:50:53.750999+0000 mgr.vm06.qbbldl (mgr.14229) 975 : audit [DBG] from='client.16758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:55 vm09 bash[34466]: audit 2026-04-15T13:50:53.750999+0000 mgr.vm06.qbbldl (mgr.14229) 975 : audit [DBG] from='client.16758 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:50:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:55 vm09 bash[34466]: cluster 2026-04-15T13:50:53.838919+0000 mgr.vm06.qbbldl (mgr.14229) 976 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:55.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:55 vm09 bash[34466]: cluster 2026-04-15T13:50:53.838919+0000 mgr.vm06.qbbldl (mgr.14229) 976 : cluster [DBG] pgmap v547: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:50:57.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:57 vm06 bash[28114]: cluster 2026-04-15T13:50:55.839542+0000 mgr.vm06.qbbldl (mgr.14229) 977 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:57.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:57 vm06 bash[28114]: cluster 2026-04-15T13:50:55.839542+0000 mgr.vm06.qbbldl (mgr.14229) 977 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:57 vm09 bash[34466]: cluster 2026-04-15T13:50:55.839542+0000 mgr.vm06.qbbldl (mgr.14229) 977 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:57.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:57 vm09 bash[34466]: cluster 2026-04-15T13:50:55.839542+0000 mgr.vm06.qbbldl (mgr.14229) 977 : cluster [DBG] pgmap v548: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:59.263 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:50:59.469 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:50:59.469 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:50:59.469 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:50:59.469 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:50:59.469 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:50:59.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:59 vm06 bash[28114]: cluster 2026-04-15T13:50:57.839988+0000 mgr.vm06.qbbldl (mgr.14229) 978 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:59.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:50:59 vm06 bash[28114]: cluster 2026-04-15T13:50:57.839988+0000 mgr.vm06.qbbldl (mgr.14229) 978 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:59 vm09 bash[34466]: cluster 2026-04-15T13:50:57.839988+0000 mgr.vm06.qbbldl (mgr.14229) 978 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:59.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:50:59 vm09 bash[34466]: cluster 2026-04-15T13:50:57.839988+0000 mgr.vm06.qbbldl (mgr.14229) 978 : cluster [DBG] pgmap v549: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:50:59.736 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:50:59.736 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:50:59.736 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:00 vm06 bash[28114]: audit 2026-04-15T13:50:59.236373+0000 mgr.vm06.qbbldl (mgr.14229) 979 : audit [DBG] from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:00 vm06 bash[28114]: audit 2026-04-15T13:50:59.236373+0000 mgr.vm06.qbbldl (mgr.14229) 979 : audit [DBG] from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:00 vm06 bash[28114]: audit 2026-04-15T13:50:59.462223+0000 mgr.vm06.qbbldl (mgr.14229) 980 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:00 vm06 bash[28114]: audit 2026-04-15T13:50:59.462223+0000 mgr.vm06.qbbldl (mgr.14229) 980 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:00 vm06 bash[28114]: audit 2026-04-15T13:50:59.732912+0000 mon.vm06 (mon.0) 1310 : audit [DBG] from='client.? 192.168.123.106:0/3483374718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:00.517 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:00 vm06 bash[28114]: audit 2026-04-15T13:50:59.732912+0000 mon.vm06 (mon.0) 1310 : audit [DBG] from='client.? 192.168.123.106:0/3483374718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:00 vm09 bash[34466]: audit 2026-04-15T13:50:59.236373+0000 mgr.vm06.qbbldl (mgr.14229) 979 : audit [DBG] from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:00 vm09 bash[34466]: audit 2026-04-15T13:50:59.236373+0000 mgr.vm06.qbbldl (mgr.14229) 979 : audit [DBG] from='client.16766 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:00 vm09 bash[34466]: audit 2026-04-15T13:50:59.462223+0000 mgr.vm06.qbbldl (mgr.14229) 980 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:00 vm09 bash[34466]: audit 2026-04-15T13:50:59.462223+0000 mgr.vm06.qbbldl (mgr.14229) 980 : audit [DBG] from='client.16770 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:00 vm09 bash[34466]: audit 2026-04-15T13:50:59.732912+0000 mon.vm06 (mon.0) 1310 : audit [DBG] from='client.? 192.168.123.106:0/3483374718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:00.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:00 vm09 bash[34466]: audit 2026-04-15T13:50:59.732912+0000 mon.vm06 (mon.0) 1310 : audit [DBG] from='client.? 192.168.123.106:0/3483374718' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:01 vm06 bash[28114]: cluster 2026-04-15T13:50:59.840544+0000 mgr.vm06.qbbldl (mgr.14229) 981 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:01 vm06 bash[28114]: cluster 2026-04-15T13:50:59.840544+0000 mgr.vm06.qbbldl (mgr.14229) 981 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:01 vm09 bash[34466]: cluster 2026-04-15T13:50:59.840544+0000 mgr.vm06.qbbldl (mgr.14229) 981 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:01.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:01 vm09 bash[34466]: cluster 2026-04-15T13:50:59.840544+0000 mgr.vm06.qbbldl (mgr.14229) 981 : cluster [DBG] pgmap v550: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:03.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:03 vm06 bash[28114]: cluster 2026-04-15T13:51:01.841156+0000 mgr.vm06.qbbldl (mgr.14229) 982 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:03.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:03 vm06 bash[28114]: cluster 2026-04-15T13:51:01.841156+0000 mgr.vm06.qbbldl (mgr.14229) 982 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:03 vm09 bash[34466]: cluster 2026-04-15T13:51:01.841156+0000 mgr.vm06.qbbldl (mgr.14229) 982 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:03.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:03 vm09 bash[34466]: cluster 2026-04-15T13:51:01.841156+0000 mgr.vm06.qbbldl (mgr.14229) 982 : cluster [DBG] pgmap v551: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:04.962 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:05.166 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:05.166 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:05.166 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:05.167 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 3m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:05.167 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 3m ago 15m - - 2026-04-15T13:51:05.432 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:05.432 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:05.432 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:05 vm06 bash[28114]: cluster 2026-04-15T13:51:03.841583+0000 mgr.vm06.qbbldl (mgr.14229) 983 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:05.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:05 vm06 bash[28114]: cluster 2026-04-15T13:51:03.841583+0000 mgr.vm06.qbbldl (mgr.14229) 983 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:05.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:05 vm09 bash[34466]: cluster 2026-04-15T13:51:03.841583+0000 mgr.vm06.qbbldl (mgr.14229) 983 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:05.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:05 vm09 bash[34466]: cluster 2026-04-15T13:51:03.841583+0000 mgr.vm06.qbbldl (mgr.14229) 983 : cluster [DBG] pgmap v552: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:06 vm06 bash[28114]: audit 2026-04-15T13:51:04.939003+0000 mgr.vm06.qbbldl (mgr.14229) 984 : audit [DBG] from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:06 vm06 bash[28114]: audit 2026-04-15T13:51:04.939003+0000 mgr.vm06.qbbldl (mgr.14229) 984 : audit [DBG] from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:06 vm06 bash[28114]: audit 2026-04-15T13:51:05.160254+0000 mgr.vm06.qbbldl (mgr.14229) 985 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:06 vm06 bash[28114]: audit 2026-04-15T13:51:05.160254+0000 mgr.vm06.qbbldl (mgr.14229) 985 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:06 vm06 bash[28114]: audit 2026-04-15T13:51:05.428815+0000 mon.vm06 (mon.0) 1311 : audit [DBG] from='client.? 192.168.123.106:0/1102281927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:06.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:06 vm06 bash[28114]: audit 2026-04-15T13:51:05.428815+0000 mon.vm06 (mon.0) 1311 : audit [DBG] from='client.? 192.168.123.106:0/1102281927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:06 vm09 bash[34466]: audit 2026-04-15T13:51:04.939003+0000 mgr.vm06.qbbldl (mgr.14229) 984 : audit [DBG] from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:06 vm09 bash[34466]: audit 2026-04-15T13:51:04.939003+0000 mgr.vm06.qbbldl (mgr.14229) 984 : audit [DBG] from='client.16778 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:06 vm09 bash[34466]: audit 2026-04-15T13:51:05.160254+0000 mgr.vm06.qbbldl (mgr.14229) 985 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:06 vm09 bash[34466]: audit 2026-04-15T13:51:05.160254+0000 mgr.vm06.qbbldl (mgr.14229) 985 : audit [DBG] from='client.16782 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:06 vm09 bash[34466]: audit 2026-04-15T13:51:05.428815+0000 mon.vm06 (mon.0) 1311 : audit [DBG] from='client.? 192.168.123.106:0/1102281927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:06.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:06 vm09 bash[34466]: audit 2026-04-15T13:51:05.428815+0000 mon.vm06 (mon.0) 1311 : audit [DBG] from='client.? 192.168.123.106:0/1102281927' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:07 vm06 bash[28114]: cluster 2026-04-15T13:51:05.842079+0000 mgr.vm06.qbbldl (mgr.14229) 986 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:07.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:07 vm06 bash[28114]: cluster 2026-04-15T13:51:05.842079+0000 mgr.vm06.qbbldl (mgr.14229) 986 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:07 vm09 bash[34466]: cluster 2026-04-15T13:51:05.842079+0000 mgr.vm06.qbbldl (mgr.14229) 986 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:07 vm09 bash[34466]: cluster 2026-04-15T13:51:05.842079+0000 mgr.vm06.qbbldl (mgr.14229) 986 : cluster [DBG] pgmap v553: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:09 vm06 bash[28114]: cluster 2026-04-15T13:51:07.842472+0000 mgr.vm06.qbbldl (mgr.14229) 987 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:09 vm06 bash[28114]: cluster 2026-04-15T13:51:07.842472+0000 mgr.vm06.qbbldl (mgr.14229) 987 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:09 vm06 bash[28114]: audit 2026-04-15T13:51:08.496240+0000 mon.vm06 (mon.0) 1312 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:09 vm06 bash[28114]: audit 2026-04-15T13:51:08.496240+0000 mon.vm06 (mon.0) 1312 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:09 vm09 bash[34466]: cluster 2026-04-15T13:51:07.842472+0000 mgr.vm06.qbbldl (mgr.14229) 987 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:09 vm09 bash[34466]: cluster 2026-04-15T13:51:07.842472+0000 mgr.vm06.qbbldl (mgr.14229) 987 : cluster [DBG] pgmap v554: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:09 vm09 bash[34466]: audit 2026-04-15T13:51:08.496240+0000 mon.vm06 (mon.0) 1312 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:09 vm09 bash[34466]: audit 2026-04-15T13:51:08.496240+0000 mon.vm06 (mon.0) 1312 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:10.651 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:10.849 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:10.849 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:10.849 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:10.849 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:10.849 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 15m - - 2026-04-15T13:51:11.092 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:11.092 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:11.092 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:11 vm06 bash[28114]: cluster 2026-04-15T13:51:09.843039+0000 mgr.vm06.qbbldl (mgr.14229) 988 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:11 vm06 bash[28114]: cluster 2026-04-15T13:51:09.843039+0000 mgr.vm06.qbbldl (mgr.14229) 988 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:11 vm06 bash[28114]: audit 2026-04-15T13:51:11.088760+0000 mon.vm06 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.106:0/2384645653' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:11 vm06 bash[28114]: audit 2026-04-15T13:51:11.088760+0000 mon.vm06 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.106:0/2384645653' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:11 vm09 bash[34466]: cluster 2026-04-15T13:51:09.843039+0000 mgr.vm06.qbbldl (mgr.14229) 988 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:11.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:11 vm09 bash[34466]: cluster 2026-04-15T13:51:09.843039+0000 mgr.vm06.qbbldl (mgr.14229) 988 : cluster [DBG] pgmap v555: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:11.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:11 vm09 bash[34466]: audit 2026-04-15T13:51:11.088760+0000 mon.vm06 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.106:0/2384645653' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:11.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:11 vm09 bash[34466]: audit 2026-04-15T13:51:11.088760+0000 mon.vm06 (mon.0) 1313 : audit [DBG] from='client.? 192.168.123.106:0/2384645653' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:10.630525+0000 mgr.vm06.qbbldl (mgr.14229) 989 : audit [DBG] from='client.16790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:10.630525+0000 mgr.vm06.qbbldl (mgr.14229) 989 : audit [DBG] from='client.16790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:10.841296+0000 mgr.vm06.qbbldl (mgr.14229) 990 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:10.841296+0000 mgr.vm06.qbbldl (mgr.14229) 990 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:11.849738+0000 mon.vm06 (mon.0) 1314 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:11.849738+0000 mon.vm06 (mon.0) 1314 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.208814+0000 mon.vm06 (mon.0) 1315 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:51:12.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.208814+0000 mon.vm06 (mon.0) 1315 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:51:12.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.209429+0000 mon.vm06 (mon.0) 1316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:51:12.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.209429+0000 mon.vm06 (mon.0) 1316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:51:12.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.215135+0000 mon.vm06 (mon.0) 1317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:12.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.215135+0000 mon.vm06 (mon.0) 1317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:12.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.216778+0000 mon.vm06 (mon.0) 1318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:51:12.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:12 vm06 bash[28114]: audit 2026-04-15T13:51:12.216778+0000 mon.vm06 (mon.0) 1318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:10.630525+0000 mgr.vm06.qbbldl (mgr.14229) 989 : audit [DBG] from='client.16790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:10.630525+0000 mgr.vm06.qbbldl (mgr.14229) 989 : audit [DBG] from='client.16790 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:10.841296+0000 mgr.vm06.qbbldl (mgr.14229) 990 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:10.841296+0000 mgr.vm06.qbbldl (mgr.14229) 990 : audit [DBG] from='client.16794 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:11.849738+0000 mon.vm06 (mon.0) 1314 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:11.849738+0000 mon.vm06 (mon.0) 1314 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.208814+0000 mon.vm06 (mon.0) 1315 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.208814+0000 mon.vm06 (mon.0) 1315 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.209429+0000 mon.vm06 (mon.0) 1316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.209429+0000 mon.vm06 (mon.0) 1316 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.215135+0000 mon.vm06 (mon.0) 1317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.215135+0000 mon.vm06 (mon.0) 1317 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.216778+0000 mon.vm06 (mon.0) 1318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:51:12.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:12 vm09 bash[34466]: audit 2026-04-15T13:51:12.216778+0000 mon.vm06 (mon.0) 1318 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:51:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:13 vm06 bash[28114]: cluster 2026-04-15T13:51:11.843612+0000 mgr.vm06.qbbldl (mgr.14229) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:13 vm06 bash[28114]: cluster 2026-04-15T13:51:11.843612+0000 mgr.vm06.qbbldl (mgr.14229) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:13 vm06 bash[28114]: cluster 2026-04-15T13:51:12.210608+0000 mgr.vm06.qbbldl (mgr.14229) 992 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:51:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:13 vm06 bash[28114]: cluster 2026-04-15T13:51:12.210608+0000 mgr.vm06.qbbldl (mgr.14229) 992 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:51:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:13 vm06 bash[28114]: cluster 2026-04-15T13:51:12.210704+0000 mgr.vm06.qbbldl (mgr.14229) 993 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:13 vm06 bash[28114]: cluster 2026-04-15T13:51:12.210704+0000 mgr.vm06.qbbldl (mgr.14229) 993 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:13 vm09 bash[34466]: cluster 2026-04-15T13:51:11.843612+0000 mgr.vm06.qbbldl (mgr.14229) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:13 vm09 bash[34466]: cluster 2026-04-15T13:51:11.843612+0000 mgr.vm06.qbbldl (mgr.14229) 991 : cluster [DBG] pgmap v556: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:51:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:13 vm09 bash[34466]: cluster 2026-04-15T13:51:12.210608+0000 mgr.vm06.qbbldl (mgr.14229) 992 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:51:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:13 vm09 bash[34466]: cluster 2026-04-15T13:51:12.210608+0000 mgr.vm06.qbbldl (mgr.14229) 992 : cluster [DBG] pgmap v557: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 98 B/s rd, 197 B/s wr, 0 op/s 2026-04-15T13:51:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:13 vm09 bash[34466]: cluster 2026-04-15T13:51:12.210704+0000 mgr.vm06.qbbldl (mgr.14229) 993 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:13 vm09 bash[34466]: cluster 2026-04-15T13:51:12.210704+0000 mgr.vm06.qbbldl (mgr.14229) 993 : cluster [DBG] pgmap v558: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:15 vm06 bash[28114]: cluster 2026-04-15T13:51:14.211086+0000 mgr.vm06.qbbldl (mgr.14229) 994 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:15 vm06 bash[28114]: cluster 2026-04-15T13:51:14.211086+0000 mgr.vm06.qbbldl (mgr.14229) 994 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:15 vm09 bash[34466]: cluster 2026-04-15T13:51:14.211086+0000 mgr.vm06.qbbldl (mgr.14229) 994 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:15 vm09 bash[34466]: cluster 2026-04-15T13:51:14.211086+0000 mgr.vm06.qbbldl (mgr.14229) 994 : cluster [DBG] pgmap v559: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:16.316 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:16.529 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:16.529 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (14m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:16.529 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:16.529 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:16.529 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 15m - - 2026-04-15T13:51:16.787 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:16.787 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:16.787 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:17 vm06 bash[28114]: cluster 2026-04-15T13:51:16.211588+0000 mgr.vm06.qbbldl (mgr.14229) 995 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:17 vm06 bash[28114]: cluster 2026-04-15T13:51:16.211588+0000 mgr.vm06.qbbldl (mgr.14229) 995 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:17 vm06 bash[28114]: audit 2026-04-15T13:51:16.293258+0000 mgr.vm06.qbbldl (mgr.14229) 996 : audit [DBG] from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:17 vm06 bash[28114]: audit 2026-04-15T13:51:16.293258+0000 mgr.vm06.qbbldl (mgr.14229) 996 : audit [DBG] from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:17 vm06 bash[28114]: audit 2026-04-15T13:51:16.783372+0000 mon.vm06 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.106:0/804213555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:17 vm06 bash[28114]: audit 2026-04-15T13:51:16.783372+0000 mon.vm06 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.106:0/804213555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:17 vm09 bash[34466]: cluster 2026-04-15T13:51:16.211588+0000 mgr.vm06.qbbldl (mgr.14229) 995 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:17 vm09 bash[34466]: cluster 2026-04-15T13:51:16.211588+0000 mgr.vm06.qbbldl (mgr.14229) 995 : cluster [DBG] pgmap v560: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 122 B/s rd, 244 B/s wr, 0 op/s 2026-04-15T13:51:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:17 vm09 bash[34466]: audit 2026-04-15T13:51:16.293258+0000 mgr.vm06.qbbldl (mgr.14229) 996 : audit [DBG] from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:17 vm09 bash[34466]: audit 2026-04-15T13:51:16.293258+0000 mgr.vm06.qbbldl (mgr.14229) 996 : audit [DBG] from='client.16802 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:17 vm09 bash[34466]: audit 2026-04-15T13:51:16.783372+0000 mon.vm06 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.106:0/804213555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:17 vm09 bash[34466]: audit 2026-04-15T13:51:16.783372+0000 mon.vm06 (mon.0) 1319 : audit [DBG] from='client.? 192.168.123.106:0/804213555' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:18 vm06 bash[28114]: audit 2026-04-15T13:51:16.522878+0000 mgr.vm06.qbbldl (mgr.14229) 997 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:18.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:18 vm06 bash[28114]: audit 2026-04-15T13:51:16.522878+0000 mgr.vm06.qbbldl (mgr.14229) 997 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:18.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:18 vm09 bash[34466]: audit 2026-04-15T13:51:16.522878+0000 mgr.vm06.qbbldl (mgr.14229) 997 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:18.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:18 vm09 bash[34466]: audit 2026-04-15T13:51:16.522878+0000 mgr.vm06.qbbldl (mgr.14229) 997 : audit [DBG] from='client.16806 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:19.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:19 vm06 bash[28114]: cluster 2026-04-15T13:51:18.212104+0000 mgr.vm06.qbbldl (mgr.14229) 998 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:19.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:19 vm06 bash[28114]: cluster 2026-04-15T13:51:18.212104+0000 mgr.vm06.qbbldl (mgr.14229) 998 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:19 vm09 bash[34466]: cluster 2026-04-15T13:51:18.212104+0000 mgr.vm06.qbbldl (mgr.14229) 998 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:19 vm09 bash[34466]: cluster 2026-04-15T13:51:18.212104+0000 mgr.vm06.qbbldl (mgr.14229) 998 : cluster [DBG] pgmap v561: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:51:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:21 vm06 bash[28114]: cluster 2026-04-15T13:51:20.212532+0000 mgr.vm06.qbbldl (mgr.14229) 999 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:51:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:21 vm06 bash[28114]: cluster 2026-04-15T13:51:20.212532+0000 mgr.vm06.qbbldl (mgr.14229) 999 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:51:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:21 vm09 bash[34466]: cluster 2026-04-15T13:51:20.212532+0000 mgr.vm06.qbbldl (mgr.14229) 999 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:51:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:21 vm09 bash[34466]: cluster 2026-04-15T13:51:20.212532+0000 mgr.vm06.qbbldl (mgr.14229) 999 : cluster [DBG] pgmap v562: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1.1 KiB/s rd, 0 B/s wr, 2 op/s 2026-04-15T13:51:22.020 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:22.220 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:22.220 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:22.220 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 15m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:22.220 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 15m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:22.220 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 15m - - 2026-04-15T13:51:22.454 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:22.454 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:22.454 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: audit 2026-04-15T13:51:21.998080+0000 mgr.vm06.qbbldl (mgr.14229) 1000 : audit [DBG] from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: audit 2026-04-15T13:51:21.998080+0000 mgr.vm06.qbbldl (mgr.14229) 1000 : audit [DBG] from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: cluster 2026-04-15T13:51:22.212898+0000 mgr.vm06.qbbldl (mgr.14229) 1001 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 0 B/s wr, 5 op/s 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: cluster 2026-04-15T13:51:22.212898+0000 mgr.vm06.qbbldl (mgr.14229) 1001 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 0 B/s wr, 5 op/s 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: audit 2026-04-15T13:51:22.214452+0000 mgr.vm06.qbbldl (mgr.14229) 1002 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: audit 2026-04-15T13:51:22.214452+0000 mgr.vm06.qbbldl (mgr.14229) 1002 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: audit 2026-04-15T13:51:22.450506+0000 mon.vm06 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.106:0/1787264651' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:22 vm06 bash[28114]: audit 2026-04-15T13:51:22.450506+0000 mon.vm06 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.106:0/1787264651' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: audit 2026-04-15T13:51:21.998080+0000 mgr.vm06.qbbldl (mgr.14229) 1000 : audit [DBG] from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: audit 2026-04-15T13:51:21.998080+0000 mgr.vm06.qbbldl (mgr.14229) 1000 : audit [DBG] from='client.16814 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: cluster 2026-04-15T13:51:22.212898+0000 mgr.vm06.qbbldl (mgr.14229) 1001 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 0 B/s wr, 5 op/s 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: cluster 2026-04-15T13:51:22.212898+0000 mgr.vm06.qbbldl (mgr.14229) 1001 : cluster [DBG] pgmap v563: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.7 KiB/s rd, 0 B/s wr, 5 op/s 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: audit 2026-04-15T13:51:22.214452+0000 mgr.vm06.qbbldl (mgr.14229) 1002 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: audit 2026-04-15T13:51:22.214452+0000 mgr.vm06.qbbldl (mgr.14229) 1002 : audit [DBG] from='client.16818 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: audit 2026-04-15T13:51:22.450506+0000 mon.vm06 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.106:0/1787264651' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:22 vm09 bash[34466]: audit 2026-04-15T13:51:22.450506+0000 mon.vm06 (mon.0) 1320 : audit [DBG] from='client.? 192.168.123.106:0/1787264651' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:23 vm09 bash[34466]: audit 2026-04-15T13:51:23.496578+0000 mon.vm06 (mon.0) 1321 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:23 vm09 bash[34466]: audit 2026-04-15T13:51:23.496578+0000 mon.vm06 (mon.0) 1321 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:24.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:23 vm06 bash[28114]: audit 2026-04-15T13:51:23.496578+0000 mon.vm06 (mon.0) 1321 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:24.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:23 vm06 bash[28114]: audit 2026-04-15T13:51:23.496578+0000 mon.vm06 (mon.0) 1321 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:25.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:24 vm06 bash[28114]: cluster 2026-04-15T13:51:24.213336+0000 mgr.vm06.qbbldl (mgr.14229) 1003 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.2 KiB/s rd, 341 B/s wr, 12 op/s 2026-04-15T13:51:25.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:24 vm06 bash[28114]: cluster 2026-04-15T13:51:24.213336+0000 mgr.vm06.qbbldl (mgr.14229) 1003 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.2 KiB/s rd, 341 B/s wr, 12 op/s 2026-04-15T13:51:25.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:24 vm09 bash[34466]: cluster 2026-04-15T13:51:24.213336+0000 mgr.vm06.qbbldl (mgr.14229) 1003 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.2 KiB/s rd, 341 B/s wr, 12 op/s 2026-04-15T13:51:25.109 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:24 vm09 bash[34466]: cluster 2026-04-15T13:51:24.213336+0000 mgr.vm06.qbbldl (mgr.14229) 1003 : cluster [DBG] pgmap v564: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 6.2 KiB/s rd, 341 B/s wr, 12 op/s 2026-04-15T13:51:27.680 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:27 vm06 bash[28114]: cluster 2026-04-15T13:51:26.213820+0000 mgr.vm06.qbbldl (mgr.14229) 1004 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:27 vm06 bash[28114]: cluster 2026-04-15T13:51:26.213820+0000 mgr.vm06.qbbldl (mgr.14229) 1004 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:27 vm09 bash[34466]: cluster 2026-04-15T13:51:26.213820+0000 mgr.vm06.qbbldl (mgr.14229) 1004 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:27 vm09 bash[34466]: cluster 2026-04-15T13:51:26.213820+0000 mgr.vm06.qbbldl (mgr.14229) 1004 : cluster [DBG] pgmap v565: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:27.867 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:27.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 9m ago 15m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:27.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 16m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:27.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:27.867 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 15m - - 2026-04-15T13:51:28.128 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:28.128 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:28.128 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:28 vm06 bash[28114]: audit 2026-04-15T13:51:28.124794+0000 mon.vm06 (mon.0) 1322 : audit [DBG] from='client.? 192.168.123.106:0/719364905' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:28 vm06 bash[28114]: audit 2026-04-15T13:51:28.124794+0000 mon.vm06 (mon.0) 1322 : audit [DBG] from='client.? 192.168.123.106:0/719364905' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:28 vm09 bash[34466]: audit 2026-04-15T13:51:28.124794+0000 mon.vm06 (mon.0) 1322 : audit [DBG] from='client.? 192.168.123.106:0/719364905' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:28 vm09 bash[34466]: audit 2026-04-15T13:51:28.124794+0000 mon.vm06 (mon.0) 1322 : audit [DBG] from='client.? 192.168.123.106:0/719364905' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:29 vm06 bash[28114]: audit 2026-04-15T13:51:27.659085+0000 mgr.vm06.qbbldl (mgr.14229) 1005 : audit [DBG] from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:29 vm06 bash[28114]: audit 2026-04-15T13:51:27.659085+0000 mgr.vm06.qbbldl (mgr.14229) 1005 : audit [DBG] from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:29 vm06 bash[28114]: audit 2026-04-15T13:51:27.860329+0000 mgr.vm06.qbbldl (mgr.14229) 1006 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:29 vm06 bash[28114]: audit 2026-04-15T13:51:27.860329+0000 mgr.vm06.qbbldl (mgr.14229) 1006 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:29 vm06 bash[28114]: cluster 2026-04-15T13:51:28.214362+0000 mgr.vm06.qbbldl (mgr.14229) 1007 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:29.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:29 vm06 bash[28114]: cluster 2026-04-15T13:51:28.214362+0000 mgr.vm06.qbbldl (mgr.14229) 1007 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:29 vm09 bash[34466]: audit 2026-04-15T13:51:27.659085+0000 mgr.vm06.qbbldl (mgr.14229) 1005 : audit [DBG] from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:29 vm09 bash[34466]: audit 2026-04-15T13:51:27.659085+0000 mgr.vm06.qbbldl (mgr.14229) 1005 : audit [DBG] from='client.16826 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:29 vm09 bash[34466]: audit 2026-04-15T13:51:27.860329+0000 mgr.vm06.qbbldl (mgr.14229) 1006 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:29 vm09 bash[34466]: audit 2026-04-15T13:51:27.860329+0000 mgr.vm06.qbbldl (mgr.14229) 1006 : audit [DBG] from='client.16830 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:29 vm09 bash[34466]: cluster 2026-04-15T13:51:28.214362+0000 mgr.vm06.qbbldl (mgr.14229) 1007 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:29.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:29 vm09 bash[34466]: cluster 2026-04-15T13:51:28.214362+0000 mgr.vm06.qbbldl (mgr.14229) 1007 : cluster [DBG] pgmap v566: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:31 vm06 bash[28114]: cluster 2026-04-15T13:51:30.214857+0000 mgr.vm06.qbbldl (mgr.14229) 1008 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:31 vm06 bash[28114]: cluster 2026-04-15T13:51:30.214857+0000 mgr.vm06.qbbldl (mgr.14229) 1008 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:31 vm09 bash[34466]: cluster 2026-04-15T13:51:30.214857+0000 mgr.vm06.qbbldl (mgr.14229) 1008 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:31 vm09 bash[34466]: cluster 2026-04-15T13:51:30.214857+0000 mgr.vm06.qbbldl (mgr.14229) 1008 : cluster [DBG] pgmap v567: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.2 KiB/s rd, 341 B/s wr, 16 op/s 2026-04-15T13:51:33.348 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:33.557 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:33.558 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 9m ago 16m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:33.558 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (9m) 9m ago 16m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:33.558 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:33.558 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 16m - - 2026-04-15T13:51:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:33 vm06 bash[28114]: cluster 2026-04-15T13:51:32.215286+0000 mgr.vm06.qbbldl (mgr.14229) 1009 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.4 KiB/s rd, 341 B/s wr, 14 op/s 2026-04-15T13:51:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:33 vm06 bash[28114]: cluster 2026-04-15T13:51:32.215286+0000 mgr.vm06.qbbldl (mgr.14229) 1009 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.4 KiB/s rd, 341 B/s wr, 14 op/s 2026-04-15T13:51:33.805 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:33.805 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:33.805 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:33 vm09 bash[34466]: cluster 2026-04-15T13:51:32.215286+0000 mgr.vm06.qbbldl (mgr.14229) 1009 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.4 KiB/s rd, 341 B/s wr, 14 op/s 2026-04-15T13:51:33.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:33 vm09 bash[34466]: cluster 2026-04-15T13:51:32.215286+0000 mgr.vm06.qbbldl (mgr.14229) 1009 : cluster [DBG] pgmap v568: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 7.4 KiB/s rd, 341 B/s wr, 14 op/s 2026-04-15T13:51:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: audit 2026-04-15T13:51:33.327200+0000 mgr.vm06.qbbldl (mgr.14229) 1010 : audit [DBG] from='client.16838 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: audit 2026-04-15T13:51:33.327200+0000 mgr.vm06.qbbldl (mgr.14229) 1010 : audit [DBG] from='client.16838 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: audit 2026-04-15T13:51:33.551702+0000 mgr.vm06.qbbldl (mgr.14229) 1011 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: audit 2026-04-15T13:51:33.551702+0000 mgr.vm06.qbbldl (mgr.14229) 1011 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: audit 2026-04-15T13:51:33.801616+0000 mon.vm06 (mon.0) 1323 : audit [DBG] from='client.? 192.168.123.106:0/2827339895' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:34.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: audit 2026-04-15T13:51:33.801616+0000 mon.vm06 (mon.0) 1323 : audit [DBG] from='client.? 192.168.123.106:0/2827339895' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: cluster 2026-04-15T13:51:34.215754+0000 mgr.vm06.qbbldl (mgr.14229) 1012 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.9 KiB/s rd, 341 B/s wr, 11 op/s 2026-04-15T13:51:34.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:34 vm09 bash[34466]: cluster 2026-04-15T13:51:34.215754+0000 mgr.vm06.qbbldl (mgr.14229) 1012 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.9 KiB/s rd, 341 B/s wr, 11 op/s 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: audit 2026-04-15T13:51:33.327200+0000 mgr.vm06.qbbldl (mgr.14229) 1010 : audit [DBG] from='client.16838 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: audit 2026-04-15T13:51:33.327200+0000 mgr.vm06.qbbldl (mgr.14229) 1010 : audit [DBG] from='client.16838 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: audit 2026-04-15T13:51:33.551702+0000 mgr.vm06.qbbldl (mgr.14229) 1011 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: audit 2026-04-15T13:51:33.551702+0000 mgr.vm06.qbbldl (mgr.14229) 1011 : audit [DBG] from='client.16842 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: audit 2026-04-15T13:51:33.801616+0000 mon.vm06 (mon.0) 1323 : audit [DBG] from='client.? 192.168.123.106:0/2827339895' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: audit 2026-04-15T13:51:33.801616+0000 mon.vm06 (mon.0) 1323 : audit [DBG] from='client.? 192.168.123.106:0/2827339895' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: cluster 2026-04-15T13:51:34.215754+0000 mgr.vm06.qbbldl (mgr.14229) 1012 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.9 KiB/s rd, 341 B/s wr, 11 op/s 2026-04-15T13:51:35.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:34 vm06 bash[28114]: cluster 2026-04-15T13:51:34.215754+0000 mgr.vm06.qbbldl (mgr.14229) 1012 : cluster [DBG] pgmap v569: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 5.9 KiB/s rd, 341 B/s wr, 11 op/s 2026-04-15T13:51:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:37 vm09 bash[34466]: cluster 2026-04-15T13:51:36.216236+0000 mgr.vm06.qbbldl (mgr.14229) 1013 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 0 B/s wr, 3 op/s 2026-04-15T13:51:37.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:37 vm09 bash[34466]: cluster 2026-04-15T13:51:36.216236+0000 mgr.vm06.qbbldl (mgr.14229) 1013 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 0 B/s wr, 3 op/s 2026-04-15T13:51:37.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:37 vm06 bash[28114]: cluster 2026-04-15T13:51:36.216236+0000 mgr.vm06.qbbldl (mgr.14229) 1013 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 0 B/s wr, 3 op/s 2026-04-15T13:51:37.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:37 vm06 bash[28114]: cluster 2026-04-15T13:51:36.216236+0000 mgr.vm06.qbbldl (mgr.14229) 1013 : cluster [DBG] pgmap v570: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 2.0 KiB/s rd, 0 B/s wr, 3 op/s 2026-04-15T13:51:39.027 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:39.216 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:39.216 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 9m ago 16m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:39.216 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (10m) 9m ago 16m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:39.216 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:39.216 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 16m - - 2026-04-15T13:51:39.468 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:39.468 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:39.468 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:39 vm09 bash[34466]: cluster 2026-04-15T13:51:38.216751+0000 mgr.vm06.qbbldl (mgr.14229) 1014 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 B/s wr, 1 op/s 2026-04-15T13:51:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:39 vm09 bash[34466]: cluster 2026-04-15T13:51:38.216751+0000 mgr.vm06.qbbldl (mgr.14229) 1014 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 B/s wr, 1 op/s 2026-04-15T13:51:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:39 vm09 bash[34466]: audit 2026-04-15T13:51:38.496743+0000 mon.vm06 (mon.0) 1324 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:39.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:39 vm09 bash[34466]: audit 2026-04-15T13:51:38.496743+0000 mon.vm06 (mon.0) 1324 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:39 vm06 bash[28114]: cluster 2026-04-15T13:51:38.216751+0000 mgr.vm06.qbbldl (mgr.14229) 1014 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 B/s wr, 1 op/s 2026-04-15T13:51:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:39 vm06 bash[28114]: cluster 2026-04-15T13:51:38.216751+0000 mgr.vm06.qbbldl (mgr.14229) 1014 : cluster [DBG] pgmap v571: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 1023 B/s rd, 0 B/s wr, 1 op/s 2026-04-15T13:51:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:39 vm06 bash[28114]: audit 2026-04-15T13:51:38.496743+0000 mon.vm06 (mon.0) 1324 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:39 vm06 bash[28114]: audit 2026-04-15T13:51:38.496743+0000 mon.vm06 (mon.0) 1324 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:40.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:40 vm09 bash[34466]: audit 2026-04-15T13:51:39.004335+0000 mgr.vm06.qbbldl (mgr.14229) 1015 : audit [DBG] from='client.16850 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:40 vm09 bash[34466]: audit 2026-04-15T13:51:39.004335+0000 mgr.vm06.qbbldl (mgr.14229) 1015 : audit [DBG] from='client.16850 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:40 vm09 bash[34466]: audit 2026-04-15T13:51:39.210030+0000 mgr.vm06.qbbldl (mgr.14229) 1016 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:40 vm09 bash[34466]: audit 2026-04-15T13:51:39.210030+0000 mgr.vm06.qbbldl (mgr.14229) 1016 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:40 vm09 bash[34466]: audit 2026-04-15T13:51:39.464313+0000 mon.vm06 (mon.0) 1325 : audit [DBG] from='client.? 192.168.123.106:0/3535166530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:40.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:40 vm09 bash[34466]: audit 2026-04-15T13:51:39.464313+0000 mon.vm06 (mon.0) 1325 : audit [DBG] from='client.? 192.168.123.106:0/3535166530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:40 vm06 bash[28114]: audit 2026-04-15T13:51:39.004335+0000 mgr.vm06.qbbldl (mgr.14229) 1015 : audit [DBG] from='client.16850 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:40 vm06 bash[28114]: audit 2026-04-15T13:51:39.004335+0000 mgr.vm06.qbbldl (mgr.14229) 1015 : audit [DBG] from='client.16850 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:40 vm06 bash[28114]: audit 2026-04-15T13:51:39.210030+0000 mgr.vm06.qbbldl (mgr.14229) 1016 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:40 vm06 bash[28114]: audit 2026-04-15T13:51:39.210030+0000 mgr.vm06.qbbldl (mgr.14229) 1016 : audit [DBG] from='client.16854 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:40 vm06 bash[28114]: audit 2026-04-15T13:51:39.464313+0000 mon.vm06 (mon.0) 1325 : audit [DBG] from='client.? 192.168.123.106:0/3535166530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:40.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:40 vm06 bash[28114]: audit 2026-04-15T13:51:39.464313+0000 mon.vm06 (mon.0) 1325 : audit [DBG] from='client.? 192.168.123.106:0/3535166530' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:41 vm09 bash[34466]: cluster 2026-04-15T13:51:40.217206+0000 mgr.vm06.qbbldl (mgr.14229) 1017 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 170 B/s wr, 31 op/s 2026-04-15T13:51:41.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:41 vm09 bash[34466]: cluster 2026-04-15T13:51:40.217206+0000 mgr.vm06.qbbldl (mgr.14229) 1017 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 170 B/s wr, 31 op/s 2026-04-15T13:51:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:41 vm06 bash[28114]: cluster 2026-04-15T13:51:40.217206+0000 mgr.vm06.qbbldl (mgr.14229) 1017 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 170 B/s wr, 31 op/s 2026-04-15T13:51:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:41 vm06 bash[28114]: cluster 2026-04-15T13:51:40.217206+0000 mgr.vm06.qbbldl (mgr.14229) 1017 : cluster [DBG] pgmap v572: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 19 KiB/s rd, 170 B/s wr, 31 op/s 2026-04-15T13:51:43.317 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:43 vm09 bash[34466]: cluster 2026-04-15T13:51:42.217610+0000 mgr.vm06.qbbldl (mgr.14229) 1018 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:51:43.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:43 vm09 bash[34466]: cluster 2026-04-15T13:51:42.217610+0000 mgr.vm06.qbbldl (mgr.14229) 1018 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:51:43.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:43 vm06 bash[28114]: cluster 2026-04-15T13:51:42.217610+0000 mgr.vm06.qbbldl (mgr.14229) 1018 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:51:43.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:43 vm06 bash[28114]: cluster 2026-04-15T13:51:42.217610+0000 mgr.vm06.qbbldl (mgr.14229) 1018 : cluster [DBG] pgmap v573: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 25 KiB/s rd, 170 B/s wr, 41 op/s 2026-04-15T13:51:44.694 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:44.897 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:44.897 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 10m ago 16m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:44.897 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (10m) 10m ago 16m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:44.897 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (4m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:44.897 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 16m - - 2026-04-15T13:51:45.157 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:45.157 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:45.157 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:45 vm09 bash[34466]: cluster 2026-04-15T13:51:44.218017+0000 mgr.vm06.qbbldl (mgr.14229) 1019 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:45 vm09 bash[34466]: cluster 2026-04-15T13:51:44.218017+0000 mgr.vm06.qbbldl (mgr.14229) 1019 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:45 vm09 bash[34466]: audit 2026-04-15T13:51:45.153391+0000 mon.vm06 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.106:0/995116041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:45.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:45 vm09 bash[34466]: audit 2026-04-15T13:51:45.153391+0000 mon.vm06 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.106:0/995116041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:45.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:45 vm06 bash[28114]: cluster 2026-04-15T13:51:44.218017+0000 mgr.vm06.qbbldl (mgr.14229) 1019 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:45.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:45 vm06 bash[28114]: cluster 2026-04-15T13:51:44.218017+0000 mgr.vm06.qbbldl (mgr.14229) 1019 : cluster [DBG] pgmap v574: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:45.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:45 vm06 bash[28114]: audit 2026-04-15T13:51:45.153391+0000 mon.vm06 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.106:0/995116041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:45.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:45 vm06 bash[28114]: audit 2026-04-15T13:51:45.153391+0000 mon.vm06 (mon.0) 1326 : audit [DBG] from='client.? 192.168.123.106:0/995116041' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:46.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:46 vm09 bash[34466]: audit 2026-04-15T13:51:44.674960+0000 mgr.vm06.qbbldl (mgr.14229) 1020 : audit [DBG] from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:46 vm09 bash[34466]: audit 2026-04-15T13:51:44.674960+0000 mgr.vm06.qbbldl (mgr.14229) 1020 : audit [DBG] from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:46 vm09 bash[34466]: audit 2026-04-15T13:51:44.890535+0000 mgr.vm06.qbbldl (mgr.14229) 1021 : audit [DBG] from='client.16866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:46 vm09 bash[34466]: audit 2026-04-15T13:51:44.890535+0000 mgr.vm06.qbbldl (mgr.14229) 1021 : audit [DBG] from='client.16866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:46 vm06 bash[28114]: audit 2026-04-15T13:51:44.674960+0000 mgr.vm06.qbbldl (mgr.14229) 1020 : audit [DBG] from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:46 vm06 bash[28114]: audit 2026-04-15T13:51:44.674960+0000 mgr.vm06.qbbldl (mgr.14229) 1020 : audit [DBG] from='client.16862 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:46 vm06 bash[28114]: audit 2026-04-15T13:51:44.890535+0000 mgr.vm06.qbbldl (mgr.14229) 1021 : audit [DBG] from='client.16866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:46.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:46 vm06 bash[28114]: audit 2026-04-15T13:51:44.890535+0000 mgr.vm06.qbbldl (mgr.14229) 1021 : audit [DBG] from='client.16866 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:47 vm09 bash[34466]: cluster 2026-04-15T13:51:46.218498+0000 mgr.vm06.qbbldl (mgr.14229) 1022 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:47.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:47 vm09 bash[34466]: cluster 2026-04-15T13:51:46.218498+0000 mgr.vm06.qbbldl (mgr.14229) 1022 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:47.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:47 vm06 bash[28114]: cluster 2026-04-15T13:51:46.218498+0000 mgr.vm06.qbbldl (mgr.14229) 1022 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:47.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:47 vm06 bash[28114]: cluster 2026-04-15T13:51:46.218498+0000 mgr.vm06.qbbldl (mgr.14229) 1022 : cluster [DBG] pgmap v575: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:49 vm09 bash[34466]: cluster 2026-04-15T13:51:48.218939+0000 mgr.vm06.qbbldl (mgr.14229) 1023 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:49.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:49 vm09 bash[34466]: cluster 2026-04-15T13:51:48.218939+0000 mgr.vm06.qbbldl (mgr.14229) 1023 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:49.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:49 vm06 bash[28114]: cluster 2026-04-15T13:51:48.218939+0000 mgr.vm06.qbbldl (mgr.14229) 1023 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:49.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:49 vm06 bash[28114]: cluster 2026-04-15T13:51:48.218939+0000 mgr.vm06.qbbldl (mgr.14229) 1023 : cluster [DBG] pgmap v576: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 36 KiB/s rd, 170 B/s wr, 59 op/s 2026-04-15T13:51:50.377 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to stop 2026-04-15T13:51:50.578 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:50.578 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 10m ago 16m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:50.578 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (10m) 10m ago 16m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:50.578 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:50.578 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 16m - - 2026-04-15T13:51:50.815 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:50.815 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:50.815 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:51 vm09 bash[34466]: cluster 2026-04-15T13:51:50.219520+0000 mgr.vm06.qbbldl (mgr.14229) 1024 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 170 B/s wr, 58 op/s 2026-04-15T13:51:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:51 vm09 bash[34466]: cluster 2026-04-15T13:51:50.219520+0000 mgr.vm06.qbbldl (mgr.14229) 1024 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 170 B/s wr, 58 op/s 2026-04-15T13:51:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:51 vm09 bash[34466]: audit 2026-04-15T13:51:50.355756+0000 mgr.vm06.qbbldl (mgr.14229) 1025 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:51 vm09 bash[34466]: audit 2026-04-15T13:51:50.355756+0000 mgr.vm06.qbbldl (mgr.14229) 1025 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:51 vm09 bash[34466]: audit 2026-04-15T13:51:50.811568+0000 mon.vm06 (mon.0) 1327 : audit [DBG] from='client.? 192.168.123.106:0/708304631' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:51.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:51 vm09 bash[34466]: audit 2026-04-15T13:51:50.811568+0000 mon.vm06 (mon.0) 1327 : audit [DBG] from='client.? 192.168.123.106:0/708304631' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:51 vm06 bash[28114]: cluster 2026-04-15T13:51:50.219520+0000 mgr.vm06.qbbldl (mgr.14229) 1024 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 170 B/s wr, 58 op/s 2026-04-15T13:51:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:51 vm06 bash[28114]: cluster 2026-04-15T13:51:50.219520+0000 mgr.vm06.qbbldl (mgr.14229) 1024 : cluster [DBG] pgmap v577: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 35 KiB/s rd, 170 B/s wr, 58 op/s 2026-04-15T13:51:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:51 vm06 bash[28114]: audit 2026-04-15T13:51:50.355756+0000 mgr.vm06.qbbldl (mgr.14229) 1025 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:51 vm06 bash[28114]: audit 2026-04-15T13:51:50.355756+0000 mgr.vm06.qbbldl (mgr.14229) 1025 : audit [DBG] from='client.16874 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:51 vm06 bash[28114]: audit 2026-04-15T13:51:50.811568+0000 mon.vm06 (mon.0) 1327 : audit [DBG] from='client.? 192.168.123.106:0/708304631' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:51.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:51 vm06 bash[28114]: audit 2026-04-15T13:51:50.811568+0000 mon.vm06 (mon.0) 1327 : audit [DBG] from='client.? 192.168.123.106:0/708304631' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:52.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:52 vm06 bash[28114]: audit 2026-04-15T13:51:50.571150+0000 mgr.vm06.qbbldl (mgr.14229) 1026 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:52.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:52 vm06 bash[28114]: audit 2026-04-15T13:51:50.571150+0000 mgr.vm06.qbbldl (mgr.14229) 1026 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:52.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:52 vm09 bash[34466]: audit 2026-04-15T13:51:50.571150+0000 mgr.vm06.qbbldl (mgr.14229) 1026 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:52.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:52 vm09 bash[34466]: audit 2026-04-15T13:51:50.571150+0000 mgr.vm06.qbbldl (mgr.14229) 1026 : audit [DBG] from='client.16878 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:53.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:53 vm06 bash[28114]: cluster 2026-04-15T13:51:52.220026+0000 mgr.vm06.qbbldl (mgr.14229) 1027 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 27 op/s 2026-04-15T13:51:53.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:53 vm06 bash[28114]: cluster 2026-04-15T13:51:52.220026+0000 mgr.vm06.qbbldl (mgr.14229) 1027 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 27 op/s 2026-04-15T13:51:53.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:53 vm09 bash[34466]: cluster 2026-04-15T13:51:52.220026+0000 mgr.vm06.qbbldl (mgr.14229) 1027 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 27 op/s 2026-04-15T13:51:53.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:53 vm09 bash[34466]: cluster 2026-04-15T13:51:52.220026+0000 mgr.vm06.qbbldl (mgr.14229) 1027 : cluster [DBG] pgmap v578: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 17 KiB/s rd, 0 B/s wr, 27 op/s 2026-04-15T13:51:54.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:54 vm06 bash[28114]: audit 2026-04-15T13:51:53.497049+0000 mon.vm06 (mon.0) 1328 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:54.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:54 vm06 bash[28114]: audit 2026-04-15T13:51:53.497049+0000 mon.vm06 (mon.0) 1328 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:54 vm09 bash[34466]: audit 2026-04-15T13:51:53.497049+0000 mon.vm06 (mon.0) 1328 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:54.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:54 vm09 bash[34466]: audit 2026-04-15T13:51:53.497049+0000 mon.vm06 (mon.0) 1328 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:51:54.998 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:51:54.999 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:51:54.999 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:51:55.201 INFO:teuthology.orchestra.run.vm06.stdout:anonymousScheduled to start rgw.foo.vm09.pxnsqu on host 'vm09' 2026-04-15T13:51:55.423 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to start 2026-04-15T13:51:55.629 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:51:55.630 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 10m ago 16m 117M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:51:55.630 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (10m) 10m ago 16m 92.5M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:51:55.630 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:51:55.630 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 16m - - 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: cluster 2026-04-15T13:51:54.220417+0000 mgr.vm06.qbbldl (mgr.14229) 1028 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 341 B/s wr, 18 op/s 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: cluster 2026-04-15T13:51:54.220417+0000 mgr.vm06.qbbldl (mgr.14229) 1028 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 341 B/s wr, 18 op/s 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: audit 2026-04-15T13:51:55.192347+0000 mon.vm06 (mon.0) 1329 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: audit 2026-04-15T13:51:55.192347+0000 mon.vm06 (mon.0) 1329 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: audit 2026-04-15T13:51:55.197485+0000 mon.vm06 (mon.0) 1330 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: audit 2026-04-15T13:51:55.197485+0000 mon.vm06 (mon.0) 1330 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: audit 2026-04-15T13:51:55.198327+0000 mon.vm06 (mon.0) 1331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:55.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:55 vm06 bash[28114]: audit 2026-04-15T13:51:55.198327+0000 mon.vm06 (mon.0) 1331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: cluster 2026-04-15T13:51:54.220417+0000 mgr.vm06.qbbldl (mgr.14229) 1028 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 341 B/s wr, 18 op/s 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: cluster 2026-04-15T13:51:54.220417+0000 mgr.vm06.qbbldl (mgr.14229) 1028 : cluster [DBG] pgmap v579: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 11 KiB/s rd, 341 B/s wr, 18 op/s 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: audit 2026-04-15T13:51:55.192347+0000 mon.vm06 (mon.0) 1329 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: audit 2026-04-15T13:51:55.192347+0000 mon.vm06 (mon.0) 1329 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: audit 2026-04-15T13:51:55.197485+0000 mon.vm06 (mon.0) 1330 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: audit 2026-04-15T13:51:55.197485+0000 mon.vm06 (mon.0) 1330 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: audit 2026-04-15T13:51:55.198327+0000 mon.vm06 (mon.0) 1331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:55.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:55 vm09 bash[34466]: audit 2026-04-15T13:51:55.198327+0000 mon.vm06 (mon.0) 1331 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:51:55.892 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:51:55.893 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:51:55.893 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: audit 2026-04-15T13:51:55.185751+0000 mgr.vm06.qbbldl (mgr.14229) 1029 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: audit 2026-04-15T13:51:55.185751+0000 mgr.vm06.qbbldl (mgr.14229) 1029 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: cephadm 2026-04-15T13:51:55.186115+0000 mgr.vm06.qbbldl (mgr.14229) 1030 : cephadm [INF] Schedule start daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: cephadm 2026-04-15T13:51:55.186115+0000 mgr.vm06.qbbldl (mgr.14229) 1030 : cephadm [INF] Schedule start daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: audit 2026-04-15T13:51:55.403982+0000 mgr.vm06.qbbldl (mgr.14229) 1031 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: audit 2026-04-15T13:51:55.403982+0000 mgr.vm06.qbbldl (mgr.14229) 1031 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: audit 2026-04-15T13:51:55.889233+0000 mon.vm06 (mon.0) 1332 : audit [DBG] from='client.? 192.168.123.106:0/641751504' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:56.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:56 vm09 bash[34466]: audit 2026-04-15T13:51:55.889233+0000 mon.vm06 (mon.0) 1332 : audit [DBG] from='client.? 192.168.123.106:0/641751504' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: audit 2026-04-15T13:51:55.185751+0000 mgr.vm06.qbbldl (mgr.14229) 1029 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: audit 2026-04-15T13:51:55.185751+0000 mgr.vm06.qbbldl (mgr.14229) 1029 : audit [DBG] from='client.16886 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "rgw.foo.vm09.pxnsqu", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: cephadm 2026-04-15T13:51:55.186115+0000 mgr.vm06.qbbldl (mgr.14229) 1030 : cephadm [INF] Schedule start daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: cephadm 2026-04-15T13:51:55.186115+0000 mgr.vm06.qbbldl (mgr.14229) 1030 : cephadm [INF] Schedule start daemon rgw.foo.vm09.pxnsqu 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: audit 2026-04-15T13:51:55.403982+0000 mgr.vm06.qbbldl (mgr.14229) 1031 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: audit 2026-04-15T13:51:55.403982+0000 mgr.vm06.qbbldl (mgr.14229) 1031 : audit [DBG] from='client.16890 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: audit 2026-04-15T13:51:55.889233+0000 mon.vm06 (mon.0) 1332 : audit [DBG] from='client.? 192.168.123.106:0/641751504' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:57.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:56 vm06 bash[28114]: audit 2026-04-15T13:51:55.889233+0000 mon.vm06 (mon.0) 1332 : audit [DBG] from='client.? 192.168.123.106:0/641751504' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:51:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:57 vm09 bash[34466]: audit 2026-04-15T13:51:55.623520+0000 mgr.vm06.qbbldl (mgr.14229) 1032 : audit [DBG] from='client.16894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:57 vm09 bash[34466]: audit 2026-04-15T13:51:55.623520+0000 mgr.vm06.qbbldl (mgr.14229) 1032 : audit [DBG] from='client.16894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:57 vm09 bash[34466]: cluster 2026-04-15T13:51:56.220978+0000 mgr.vm06.qbbldl (mgr.14229) 1033 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:57.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:57 vm09 bash[34466]: cluster 2026-04-15T13:51:56.220978+0000 mgr.vm06.qbbldl (mgr.14229) 1033 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:57 vm06 bash[28114]: audit 2026-04-15T13:51:55.623520+0000 mgr.vm06.qbbldl (mgr.14229) 1032 : audit [DBG] from='client.16894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:57 vm06 bash[28114]: audit 2026-04-15T13:51:55.623520+0000 mgr.vm06.qbbldl (mgr.14229) 1032 : audit [DBG] from='client.16894 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:51:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:57 vm06 bash[28114]: cluster 2026-04-15T13:51:56.220978+0000 mgr.vm06.qbbldl (mgr.14229) 1033 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:58.016 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:57 vm06 bash[28114]: cluster 2026-04-15T13:51:56.220978+0000 mgr.vm06.qbbldl (mgr.14229) 1033 : cluster [DBG] pgmap v580: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:58 vm09 bash[34466]: cluster 2026-04-15T13:51:58.221439+0000 mgr.vm06.qbbldl (mgr.14229) 1034 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:58.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:51:58 vm09 bash[34466]: cluster 2026-04-15T13:51:58.221439+0000 mgr.vm06.qbbldl (mgr.14229) 1034 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:58.889 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:58 vm06 bash[28114]: cluster 2026-04-15T13:51:58.221439+0000 mgr.vm06.qbbldl (mgr.14229) 1034 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:51:58.889 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:51:58 vm06 bash[28114]: cluster 2026-04-15T13:51:58.221439+0000 mgr.vm06.qbbldl (mgr.14229) 1034 : cluster [DBG] pgmap v581: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:01.129 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for rgw.foo.vm09.pxnsqu to start 2026-04-15T13:52:01.340 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:01.340 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (15m) 0s ago 16m 135M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:52:01.340 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (10m) 0s ago 16m 128M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:52:01.340 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 4m ago 16m 94.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:52:01.340 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 error 4m ago 16m - - 2026-04-15T13:52:01.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: cluster 2026-04-15T13:52:00.221916+0000 mgr.vm06.qbbldl (mgr.14229) 1035 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: cluster 2026-04-15T13:52:00.221916+0000 mgr.vm06.qbbldl (mgr.14229) 1035 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.614549+0000 mon.vm06 (mon.0) 1333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.614549+0000 mon.vm06 (mon.0) 1333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.619860+0000 mon.vm06 (mon.0) 1334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.619860+0000 mon.vm06 (mon.0) 1334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.620746+0000 mon.vm06 (mon.0) 1335 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.620746+0000 mon.vm06 (mon.0) 1335 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.621285+0000 mon.vm06 (mon.0) 1336 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.621285+0000 mon.vm06 (mon.0) 1336 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.624724+0000 mon.vm06 (mon.0) 1337 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.624724+0000 mon.vm06 (mon.0) 1337 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.626400+0000 mon.vm06 (mon.0) 1338 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:00.626400+0000 mon.vm06 (mon.0) 1338 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:01.268919+0000 mon.vm06 (mon.0) 1339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:01 vm09 bash[34466]: audit 2026-04-15T13:52:01.268919+0000 mon.vm06 (mon.0) 1339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.643 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_WARN 1 failed cephadm daemon(s) 2026-04-15T13:52:01.643 INFO:teuthology.orchestra.run.vm06.stdout:[WRN] CEPHADM_FAILED_DAEMON: 1 failed cephadm daemon(s) 2026-04-15T13:52:01.643 INFO:teuthology.orchestra.run.vm06.stdout: daemon rgw.foo.vm09.pxnsqu on vm09 is in error state 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: cluster 2026-04-15T13:52:00.221916+0000 mgr.vm06.qbbldl (mgr.14229) 1035 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: cluster 2026-04-15T13:52:00.221916+0000 mgr.vm06.qbbldl (mgr.14229) 1035 : cluster [DBG] pgmap v582: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.614549+0000 mon.vm06 (mon.0) 1333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.614549+0000 mon.vm06 (mon.0) 1333 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.619860+0000 mon.vm06 (mon.0) 1334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.619860+0000 mon.vm06 (mon.0) 1334 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.620746+0000 mon.vm06 (mon.0) 1335 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.620746+0000 mon.vm06 (mon.0) 1335 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.621285+0000 mon.vm06 (mon.0) 1336 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:01.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.621285+0000 mon.vm06 (mon.0) 1336 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.624724+0000 mon.vm06 (mon.0) 1337 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.624724+0000 mon.vm06 (mon.0) 1337 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.626400+0000 mon.vm06 (mon.0) 1338 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:00.626400+0000 mon.vm06 (mon.0) 1338 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:01.268919+0000 mon.vm06 (mon.0) 1339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:01.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:01 vm06 bash[28114]: audit 2026-04-15T13:52:01.268919+0000 mon.vm06 (mon.0) 1339 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: cluster 2026-04-15T13:52:00.622409+0000 mgr.vm06.qbbldl (mgr.14229) 1036 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: cluster 2026-04-15T13:52:00.622409+0000 mgr.vm06.qbbldl (mgr.14229) 1036 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.103669+0000 mgr.vm06.qbbldl (mgr.14229) 1037 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.103669+0000 mgr.vm06.qbbldl (mgr.14229) 1037 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.306585+0000 mon.vm06 (mon.0) 1340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.306585+0000 mon.vm06 (mon.0) 1340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.313596+0000 mon.vm06 (mon.0) 1341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.313596+0000 mon.vm06 (mon.0) 1341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.333740+0000 mgr.vm06.qbbldl (mgr.14229) 1038 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.333740+0000 mgr.vm06.qbbldl (mgr.14229) 1038 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.639638+0000 mon.vm06 (mon.0) 1342 : audit [DBG] from='client.? 192.168.123.106:0/1155990473' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:02.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:02 vm09 bash[34466]: audit 2026-04-15T13:52:01.639638+0000 mon.vm06 (mon.0) 1342 : audit [DBG] from='client.? 192.168.123.106:0/1155990473' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: cluster 2026-04-15T13:52:00.622409+0000 mgr.vm06.qbbldl (mgr.14229) 1036 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: cluster 2026-04-15T13:52:00.622409+0000 mgr.vm06.qbbldl (mgr.14229) 1036 : cluster [DBG] pgmap v583: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 196 B/s rd, 393 B/s wr, 0 op/s 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.103669+0000 mgr.vm06.qbbldl (mgr.14229) 1037 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.103669+0000 mgr.vm06.qbbldl (mgr.14229) 1037 : audit [DBG] from='client.16902 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.306585+0000 mon.vm06 (mon.0) 1340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.306585+0000 mon.vm06 (mon.0) 1340 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:02.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.313596+0000 mon.vm06 (mon.0) 1341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.313596+0000 mon.vm06 (mon.0) 1341 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.333740+0000 mgr.vm06.qbbldl (mgr.14229) 1038 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.333740+0000 mgr.vm06.qbbldl (mgr.14229) 1038 : audit [DBG] from='client.16906 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "rgw", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.639638+0000 mon.vm06 (mon.0) 1342 : audit [DBG] from='client.? 192.168.123.106:0/1155990473' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:02.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:02 vm06 bash[28114]: audit 2026-04-15T13:52:01.639638+0000 mon.vm06 (mon.0) 1342 : audit [DBG] from='client.? 192.168.123.106:0/1155990473' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:04 vm09 bash[34466]: cluster 2026-04-15T13:52:02.622787+0000 mgr.vm06.qbbldl (mgr.14229) 1039 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.1 KiB/s rd, 393 B/s wr, 5 op/s 2026-04-15T13:52:04.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:04 vm09 bash[34466]: cluster 2026-04-15T13:52:02.622787+0000 mgr.vm06.qbbldl (mgr.14229) 1039 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.1 KiB/s rd, 393 B/s wr, 5 op/s 2026-04-15T13:52:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:04 vm06 bash[28114]: cluster 2026-04-15T13:52:02.622787+0000 mgr.vm06.qbbldl (mgr.14229) 1039 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.1 KiB/s rd, 393 B/s wr, 5 op/s 2026-04-15T13:52:04.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:04 vm06 bash[28114]: cluster 2026-04-15T13:52:02.622787+0000 mgr.vm06.qbbldl (mgr.14229) 1039 : cluster [DBG] pgmap v584: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 3.1 KiB/s rd, 393 B/s wr, 5 op/s 2026-04-15T13:52:06.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:06 vm06 bash[28114]: cluster 2026-04-15T13:52:04.623253+0000 mgr.vm06.qbbldl (mgr.14229) 1040 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 64 op/s 2026-04-15T13:52:06.516 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:06 vm06 bash[28114]: cluster 2026-04-15T13:52:04.623253+0000 mgr.vm06.qbbldl (mgr.14229) 1040 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 64 op/s 2026-04-15T13:52:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:06 vm09 bash[34466]: cluster 2026-04-15T13:52:04.623253+0000 mgr.vm06.qbbldl (mgr.14229) 1040 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 64 op/s 2026-04-15T13:52:06.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:06 vm09 bash[34466]: cluster 2026-04-15T13:52:04.623253+0000 mgr.vm06.qbbldl (mgr.14229) 1040 : cluster [DBG] pgmap v585: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 39 KiB/s rd, 0 B/s wr, 64 op/s 2026-04-15T13:52:06.850 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (5s) 0s ago 16m 92.4M - 20.2.0-19-g7ec4401a095 b4cb326006c0 d38ed17e823f 2026-04-15T13:52:06.851 INFO:teuthology.orchestra.run.vm06.stdout:Check with each haproxy down in turn... 2026-04-15T13:52:07.249 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop haproxy.rgw.foo.vm06.ndmjsv on host 'vm06' 2026-04-15T13:52:07.474 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for haproxy.rgw.foo.vm06.ndmjsv to stop 2026-04-15T13:52:07.673 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:07.673 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 running (16m) 7s ago 16m 4219k - 2.3.17-d1c9119 5479ac79e01f 54e16c005c3e 2026-04-15T13:52:07.673 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 running (16m) 1s ago 16m 4188k - 2.3.17-d1c9119 5479ac79e01f 4bef945ada04 2026-04-15T13:52:07.698 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.421683+0000 mon.vm06 (mon.0) 1343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.698 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.421683+0000 mon.vm06 (mon.0) 1343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.698 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.426857+0000 mon.vm06 (mon.0) 1344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.426857+0000 mon.vm06 (mon.0) 1344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.427467+0000 mon.vm06 (mon.0) 1345 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.427467+0000 mon.vm06 (mon.0) 1345 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.427922+0000 mon.vm06 (mon.0) 1346 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.427922+0000 mon.vm06 (mon.0) 1346 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: cluster 2026-04-15T13:52:06.428848+0000 mgr.vm06.qbbldl (mgr.14229) 1041 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 112 op/s 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: cluster 2026-04-15T13:52:06.428848+0000 mgr.vm06.qbbldl (mgr.14229) 1041 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 112 op/s 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.431060+0000 mon.vm06 (mon.0) 1347 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.431060+0000 mon.vm06 (mon.0) 1347 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.432372+0000 mon.vm06 (mon.0) 1348 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:06.432372+0000 mon.vm06 (mon.0) 1348 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.238322+0000 mon.vm06 (mon.0) 1349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.238322+0000 mon.vm06 (mon.0) 1349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.244552+0000 mon.vm06 (mon.0) 1350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.244552+0000 mon.vm06 (mon.0) 1350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.245432+0000 mon.vm06 (mon.0) 1351 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.245432+0000 mon.vm06 (mon.0) 1351 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.246608+0000 mon.vm06 (mon.0) 1352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.246608+0000 mon.vm06 (mon.0) 1352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.247012+0000 mon.vm06 (mon.0) 1353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.247012+0000 mon.vm06 (mon.0) 1353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.250658+0000 mon.vm06 (mon.0) 1354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.250658+0000 mon.vm06 (mon.0) 1354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.251948+0000 mon.vm06 (mon.0) 1355 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.699 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:07 vm06 bash[28114]: audit 2026-04-15T13:52:07.251948+0000 mon.vm06 (mon.0) 1355 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.421683+0000 mon.vm06 (mon.0) 1343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.421683+0000 mon.vm06 (mon.0) 1343 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.426857+0000 mon.vm06 (mon.0) 1344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.426857+0000 mon.vm06 (mon.0) 1344 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.427467+0000 mon.vm06 (mon.0) 1345 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.427467+0000 mon.vm06 (mon.0) 1345 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.427922+0000 mon.vm06 (mon.0) 1346 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.427922+0000 mon.vm06 (mon.0) 1346 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: cluster 2026-04-15T13:52:06.428848+0000 mgr.vm06.qbbldl (mgr.14229) 1041 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 112 op/s 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: cluster 2026-04-15T13:52:06.428848+0000 mgr.vm06.qbbldl (mgr.14229) 1041 : cluster [DBG] pgmap v586: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 69 KiB/s rd, 0 B/s wr, 112 op/s 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.431060+0000 mon.vm06 (mon.0) 1347 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.431060+0000 mon.vm06 (mon.0) 1347 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.432372+0000 mon.vm06 (mon.0) 1348 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:06.432372+0000 mon.vm06 (mon.0) 1348 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.238322+0000 mon.vm06 (mon.0) 1349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.238322+0000 mon.vm06 (mon.0) 1349 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.244552+0000 mon.vm06 (mon.0) 1350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.244552+0000 mon.vm06 (mon.0) 1350 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.245432+0000 mon.vm06 (mon.0) 1351 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.245432+0000 mon.vm06 (mon.0) 1351 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.246608+0000 mon.vm06 (mon.0) 1352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.246608+0000 mon.vm06 (mon.0) 1352 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.247012+0000 mon.vm06 (mon.0) 1353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.247012+0000 mon.vm06 (mon.0) 1353 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.250658+0000 mon.vm06 (mon.0) 1354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.250658+0000 mon.vm06 (mon.0) 1354 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.251948+0000 mon.vm06 (mon.0) 1355 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:07 vm09 bash[34466]: audit 2026-04-15T13:52:07.251948+0000 mon.vm06 (mon.0) 1355 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:07.928 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:06.830076+0000 mgr.vm06.qbbldl (mgr.14229) 1042 : audit [DBG] from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:06.830076+0000 mgr.vm06.qbbldl (mgr.14229) 1042 : audit [DBG] from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.026459+0000 mgr.vm06.qbbldl (mgr.14229) 1043 : audit [DBG] from='client.16924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.026459+0000 mgr.vm06.qbbldl (mgr.14229) 1043 : audit [DBG] from='client.16924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.230742+0000 mgr.vm06.qbbldl (mgr.14229) 1044 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.230742+0000 mgr.vm06.qbbldl (mgr.14229) 1044 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: cephadm 2026-04-15T13:52:07.231195+0000 mgr.vm06.qbbldl (mgr.14229) 1045 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: cephadm 2026-04-15T13:52:07.231195+0000 mgr.vm06.qbbldl (mgr.14229) 1045 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: cluster 2026-04-15T13:52:07.427954+0000 mon.vm06 (mon.0) 1356 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: cluster 2026-04-15T13:52:07.427954+0000 mon.vm06 (mon.0) 1356 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: cluster 2026-04-15T13:52:07.427972+0000 mon.vm06 (mon.0) 1357 : cluster [INF] Cluster is now healthy 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: cluster 2026-04-15T13:52:07.427972+0000 mon.vm06 (mon.0) 1357 : cluster [INF] Cluster is now healthy 2026-04-15T13:52:08.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.450811+0000 mgr.vm06.qbbldl (mgr.14229) 1046 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.450811+0000 mgr.vm06.qbbldl (mgr.14229) 1046 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.720961+0000 mon.vm06 (mon.0) 1358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.720961+0000 mon.vm06 (mon.0) 1358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.726069+0000 mon.vm06 (mon.0) 1359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.726069+0000 mon.vm06 (mon.0) 1359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.727510+0000 mon.vm06 (mon.0) 1360 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.727510+0000 mon.vm06 (mon.0) 1360 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.924885+0000 mon.vm06 (mon.0) 1361 : audit [DBG] from='client.? 192.168.123.106:0/1271799741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:08.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:08 vm06 bash[28114]: audit 2026-04-15T13:52:07.924885+0000 mon.vm06 (mon.0) 1361 : audit [DBG] from='client.? 192.168.123.106:0/1271799741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:06.830076+0000 mgr.vm06.qbbldl (mgr.14229) 1042 : audit [DBG] from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:06.830076+0000 mgr.vm06.qbbldl (mgr.14229) 1042 : audit [DBG] from='client.16920 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.026459+0000 mgr.vm06.qbbldl (mgr.14229) 1043 : audit [DBG] from='client.16924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.026459+0000 mgr.vm06.qbbldl (mgr.14229) 1043 : audit [DBG] from='client.16924 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.230742+0000 mgr.vm06.qbbldl (mgr.14229) 1044 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.230742+0000 mgr.vm06.qbbldl (mgr.14229) 1044 : audit [DBG] from='client.16928 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: cephadm 2026-04-15T13:52:07.231195+0000 mgr.vm06.qbbldl (mgr.14229) 1045 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: cephadm 2026-04-15T13:52:07.231195+0000 mgr.vm06.qbbldl (mgr.14229) 1045 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: cluster 2026-04-15T13:52:07.427954+0000 mon.vm06 (mon.0) 1356 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: cluster 2026-04-15T13:52:07.427954+0000 mon.vm06 (mon.0) 1356 : cluster [INF] Health check cleared: CEPHADM_FAILED_DAEMON (was: 1 failed cephadm daemon(s)) 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: cluster 2026-04-15T13:52:07.427972+0000 mon.vm06 (mon.0) 1357 : cluster [INF] Cluster is now healthy 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: cluster 2026-04-15T13:52:07.427972+0000 mon.vm06 (mon.0) 1357 : cluster [INF] Cluster is now healthy 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.450811+0000 mgr.vm06.qbbldl (mgr.14229) 1046 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.450811+0000 mgr.vm06.qbbldl (mgr.14229) 1046 : audit [DBG] from='client.16932 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.720961+0000 mon.vm06 (mon.0) 1358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.720961+0000 mon.vm06 (mon.0) 1358 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.726069+0000 mon.vm06 (mon.0) 1359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.726069+0000 mon.vm06 (mon.0) 1359 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.727510+0000 mon.vm06 (mon.0) 1360 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.727510+0000 mon.vm06 (mon.0) 1360 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.924885+0000 mon.vm06 (mon.0) 1361 : audit [DBG] from='client.? 192.168.123.106:0/1271799741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:08.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:08 vm09 bash[34466]: audit 2026-04-15T13:52:07.924885+0000 mon.vm06 (mon.0) 1361 : audit [DBG] from='client.? 192.168.123.106:0/1271799741' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: audit 2026-04-15T13:52:07.665743+0000 mgr.vm06.qbbldl (mgr.14229) 1047 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: audit 2026-04-15T13:52:07.665743+0000 mgr.vm06.qbbldl (mgr.14229) 1047 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: cluster 2026-04-15T13:52:08.431172+0000 mgr.vm06.qbbldl (mgr.14229) 1048 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 0 B/s wr, 127 op/s 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: cluster 2026-04-15T13:52:08.431172+0000 mgr.vm06.qbbldl (mgr.14229) 1048 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 0 B/s wr, 127 op/s 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: audit 2026-04-15T13:52:08.500684+0000 mon.vm06 (mon.0) 1362 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: audit 2026-04-15T13:52:08.500684+0000 mon.vm06 (mon.0) 1362 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: audit 2026-04-15T13:52:08.501240+0000 mon.vm06 (mon.0) 1363 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:09.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:09 vm06 bash[28114]: audit 2026-04-15T13:52:08.501240+0000 mon.vm06 (mon.0) 1363 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: audit 2026-04-15T13:52:07.665743+0000 mgr.vm06.qbbldl (mgr.14229) 1047 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: audit 2026-04-15T13:52:07.665743+0000 mgr.vm06.qbbldl (mgr.14229) 1047 : audit [DBG] from='client.16936 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: cluster 2026-04-15T13:52:08.431172+0000 mgr.vm06.qbbldl (mgr.14229) 1048 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 0 B/s wr, 127 op/s 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: cluster 2026-04-15T13:52:08.431172+0000 mgr.vm06.qbbldl (mgr.14229) 1048 : cluster [DBG] pgmap v587: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 0 B/s wr, 127 op/s 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: audit 2026-04-15T13:52:08.500684+0000 mon.vm06 (mon.0) 1362 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: audit 2026-04-15T13:52:08.500684+0000 mon.vm06 (mon.0) 1362 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: audit 2026-04-15T13:52:08.501240+0000 mon.vm06 (mon.0) 1363 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:09.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:09 vm09 bash[34466]: audit 2026-04-15T13:52:08.501240+0000 mon.vm06 (mon.0) 1363 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:11 vm06 bash[28114]: cluster 2026-04-15T13:52:10.431689+0000 mgr.vm06.qbbldl (mgr.14229) 1049 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 200 B/s wr, 128 op/s 2026-04-15T13:52:11.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:11 vm06 bash[28114]: cluster 2026-04-15T13:52:10.431689+0000 mgr.vm06.qbbldl (mgr.14229) 1049 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 200 B/s wr, 128 op/s 2026-04-15T13:52:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:11 vm09 bash[34466]: cluster 2026-04-15T13:52:10.431689+0000 mgr.vm06.qbbldl (mgr.14229) 1049 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 200 B/s wr, 128 op/s 2026-04-15T13:52:11.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:11 vm09 bash[34466]: cluster 2026-04-15T13:52:10.431689+0000 mgr.vm06.qbbldl (mgr.14229) 1049 : cluster [DBG] pgmap v588: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 78 KiB/s rd, 200 B/s wr, 128 op/s 2026-04-15T13:52:13.139 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 stopped 0s ago 16m - - 2026-04-15T13:52:13.145 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:52:13.148 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:52:13.153 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-04-15T13:52:13.153 INFO:teuthology.orchestra.run.vm06.stderr:curl: (7) Failed to connect to 12.12.1.106 port 9000: Connection refused 2026-04-15T13:52:13.153 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for http://12.12.1.106:9000/ to be available 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: cluster 2026-04-15T13:52:12.432102+0000 mgr.vm06.qbbldl (mgr.14229) 1050 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 173 B/s wr, 110 op/s 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: cluster 2026-04-15T13:52:12.432102+0000 mgr.vm06.qbbldl (mgr.14229) 1050 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 173 B/s wr, 110 op/s 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:12.973409+0000 mon.vm06 (mon.0) 1364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:12.973409+0000 mon.vm06 (mon.0) 1364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:12.979157+0000 mon.vm06 (mon.0) 1365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:12.979157+0000 mon.vm06 (mon.0) 1365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.315272+0000 mon.vm06 (mon.0) 1366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.315272+0000 mon.vm06 (mon.0) 1366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.315824+0000 mon.vm06 (mon.0) 1367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.315824+0000 mon.vm06 (mon.0) 1367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.320475+0000 mon.vm06 (mon.0) 1368 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.320475+0000 mon.vm06 (mon.0) 1368 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.322566+0000 mon.vm06 (mon.0) 1369 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:13.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:13 vm06 bash[28114]: audit 2026-04-15T13:52:13.322566+0000 mon.vm06 (mon.0) 1369 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: cluster 2026-04-15T13:52:12.432102+0000 mgr.vm06.qbbldl (mgr.14229) 1050 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 173 B/s wr, 110 op/s 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: cluster 2026-04-15T13:52:12.432102+0000 mgr.vm06.qbbldl (mgr.14229) 1050 : cluster [DBG] pgmap v589: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 68 KiB/s rd, 173 B/s wr, 110 op/s 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:12.973409+0000 mon.vm06 (mon.0) 1364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:12.973409+0000 mon.vm06 (mon.0) 1364 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:12.979157+0000 mon.vm06 (mon.0) 1365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:12.979157+0000 mon.vm06 (mon.0) 1365 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.315272+0000 mon.vm06 (mon.0) 1366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.315272+0000 mon.vm06 (mon.0) 1366 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.315824+0000 mon.vm06 (mon.0) 1367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.315824+0000 mon.vm06 (mon.0) 1367 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.320475+0000 mon.vm06 (mon.0) 1368 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.320475+0000 mon.vm06 (mon.0) 1368 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.322566+0000 mon.vm06 (mon.0) 1369 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:13.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:13 vm09 bash[34466]: audit 2026-04-15T13:52:13.322566+0000 mon.vm06 (mon.0) 1369 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:14.157 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:52:14.157 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:52:14.157 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 2026-04-15T13:52:14.157 INFO:teuthology.orchestra.run.vm06.stderr:curl: (7) Failed to connect to 12.12.1.106 port 9000: Connection refused 2026-04-15T13:52:14.157 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for http://12.12.1.106:9000/ to be available 2026-04-15T13:52:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:14 vm06 bash[28114]: audit 2026-04-15T13:52:13.118900+0000 mgr.vm06.qbbldl (mgr.14229) 1051 : audit [DBG] from='client.16944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:14.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:14 vm06 bash[28114]: audit 2026-04-15T13:52:13.118900+0000 mgr.vm06.qbbldl (mgr.14229) 1051 : audit [DBG] from='client.16944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:14 vm09 bash[34466]: audit 2026-04-15T13:52:13.118900+0000 mgr.vm06.qbbldl (mgr.14229) 1051 : audit [DBG] from='client.16944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:14.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:14 vm09 bash[34466]: audit 2026-04-15T13:52:13.118900+0000 mgr.vm06.qbbldl (mgr.14229) 1051 : audit [DBG] from='client.16944 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:15.162 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:52:15.162 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:52:15.162 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:52:15.364 INFO:teuthology.orchestra.run.vm06.stdout:anonymousScheduled to start haproxy.rgw.foo.vm06.ndmjsv on host 'vm06' 2026-04-15T13:52:15.575 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for haproxy.rgw.foo.vm06.ndmjsv to start 2026-04-15T13:52:15.757 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:15.757 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 stopped 2s ago 16m - - 2026-04-15T13:52:15.757 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 running (16m) 9s ago 16m 4188k - 2.3.17-d1c9119 5479ac79e01f 4bef945ada04 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: cluster 2026-04-15T13:52:14.432483+0000 mgr.vm06.qbbldl (mgr.14229) 1052 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 173 B/s wr, 106 op/s 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: cluster 2026-04-15T13:52:14.432483+0000 mgr.vm06.qbbldl (mgr.14229) 1052 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 173 B/s wr, 106 op/s 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: audit 2026-04-15T13:52:15.354149+0000 mon.vm06 (mon.0) 1370 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: audit 2026-04-15T13:52:15.354149+0000 mon.vm06 (mon.0) 1370 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: audit 2026-04-15T13:52:15.359848+0000 mon.vm06 (mon.0) 1371 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: audit 2026-04-15T13:52:15.359848+0000 mon.vm06 (mon.0) 1371 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: audit 2026-04-15T13:52:15.360805+0000 mon.vm06 (mon.0) 1372 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:15.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:15 vm06 bash[28114]: audit 2026-04-15T13:52:15.360805+0000 mon.vm06 (mon.0) 1372 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: cluster 2026-04-15T13:52:14.432483+0000 mgr.vm06.qbbldl (mgr.14229) 1052 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 173 B/s wr, 106 op/s 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: cluster 2026-04-15T13:52:14.432483+0000 mgr.vm06.qbbldl (mgr.14229) 1052 : cluster [DBG] pgmap v590: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 65 KiB/s rd, 173 B/s wr, 106 op/s 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: audit 2026-04-15T13:52:15.354149+0000 mon.vm06 (mon.0) 1370 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: audit 2026-04-15T13:52:15.354149+0000 mon.vm06 (mon.0) 1370 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: audit 2026-04-15T13:52:15.359848+0000 mon.vm06 (mon.0) 1371 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: audit 2026-04-15T13:52:15.359848+0000 mon.vm06 (mon.0) 1371 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: audit 2026-04-15T13:52:15.360805+0000 mon.vm06 (mon.0) 1372 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:15.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:15 vm09 bash[34466]: audit 2026-04-15T13:52:15.360805+0000 mon.vm06 (mon.0) 1372 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:15.976 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.347572+0000 mgr.vm06.qbbldl (mgr.14229) 1053 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.347572+0000 mgr.vm06.qbbldl (mgr.14229) 1053 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: cephadm 2026-04-15T13:52:15.347938+0000 mgr.vm06.qbbldl (mgr.14229) 1054 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: cephadm 2026-04-15T13:52:15.347938+0000 mgr.vm06.qbbldl (mgr.14229) 1054 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.689667+0000 mon.vm06 (mon.0) 1373 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.689667+0000 mon.vm06 (mon.0) 1373 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.690248+0000 mon.vm06 (mon.0) 1374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:16.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.690248+0000 mon.vm06 (mon.0) 1374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.694782+0000 mon.vm06 (mon.0) 1375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.694782+0000 mon.vm06 (mon.0) 1375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.696023+0000 mon.vm06 (mon.0) 1376 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.696023+0000 mon.vm06 (mon.0) 1376 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.973505+0000 mon.vm06 (mon.0) 1377 : audit [DBG] from='client.? 192.168.123.106:0/1969424708' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:15.973505+0000 mon.vm06 (mon.0) 1377 : audit [DBG] from='client.? 192.168.123.106:0/1969424708' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:16.264390+0000 mon.vm06 (mon.0) 1378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:16.264390+0000 mon.vm06 (mon.0) 1378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:16.268991+0000 mon.vm06 (mon.0) 1379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:16.268991+0000 mon.vm06 (mon.0) 1379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:16.270959+0000 mon.vm06 (mon.0) 1380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:16.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:16 vm09 bash[34466]: audit 2026-04-15T13:52:16.270959+0000 mon.vm06 (mon.0) 1380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.347572+0000 mgr.vm06.qbbldl (mgr.14229) 1053 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.347572+0000 mgr.vm06.qbbldl (mgr.14229) 1053 : audit [DBG] from='client.16948 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm06.ndmjsv", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:16.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: cephadm 2026-04-15T13:52:15.347938+0000 mgr.vm06.qbbldl (mgr.14229) 1054 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: cephadm 2026-04-15T13:52:15.347938+0000 mgr.vm06.qbbldl (mgr.14229) 1054 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm06.ndmjsv 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.689667+0000 mon.vm06 (mon.0) 1373 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.689667+0000 mon.vm06 (mon.0) 1373 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.690248+0000 mon.vm06 (mon.0) 1374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.690248+0000 mon.vm06 (mon.0) 1374 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.694782+0000 mon.vm06 (mon.0) 1375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.694782+0000 mon.vm06 (mon.0) 1375 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.696023+0000 mon.vm06 (mon.0) 1376 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.696023+0000 mon.vm06 (mon.0) 1376 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.973505+0000 mon.vm06 (mon.0) 1377 : audit [DBG] from='client.? 192.168.123.106:0/1969424708' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:15.973505+0000 mon.vm06 (mon.0) 1377 : audit [DBG] from='client.? 192.168.123.106:0/1969424708' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:16.264390+0000 mon.vm06 (mon.0) 1378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:16.264390+0000 mon.vm06 (mon.0) 1378 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:16.268991+0000 mon.vm06 (mon.0) 1379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:16.268991+0000 mon.vm06 (mon.0) 1379 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:16.270959+0000 mon.vm06 (mon.0) 1380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:16.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:16 vm06 bash[28114]: audit 2026-04-15T13:52:16.270959+0000 mon.vm06 (mon.0) 1380 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:17 vm06 bash[28114]: audit 2026-04-15T13:52:15.554341+0000 mgr.vm06.qbbldl (mgr.14229) 1055 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:17 vm06 bash[28114]: audit 2026-04-15T13:52:15.554341+0000 mgr.vm06.qbbldl (mgr.14229) 1055 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:17 vm06 bash[28114]: audit 2026-04-15T13:52:15.751828+0000 mgr.vm06.qbbldl (mgr.14229) 1056 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:17 vm06 bash[28114]: audit 2026-04-15T13:52:15.751828+0000 mgr.vm06.qbbldl (mgr.14229) 1056 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:17 vm06 bash[28114]: cluster 2026-04-15T13:52:16.432998+0000 mgr.vm06.qbbldl (mgr.14229) 1057 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 173 B/s wr, 54 op/s 2026-04-15T13:52:17.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:17 vm06 bash[28114]: cluster 2026-04-15T13:52:16.432998+0000 mgr.vm06.qbbldl (mgr.14229) 1057 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 173 B/s wr, 54 op/s 2026-04-15T13:52:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:17 vm09 bash[34466]: audit 2026-04-15T13:52:15.554341+0000 mgr.vm06.qbbldl (mgr.14229) 1055 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:17 vm09 bash[34466]: audit 2026-04-15T13:52:15.554341+0000 mgr.vm06.qbbldl (mgr.14229) 1055 : audit [DBG] from='client.16952 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:17 vm09 bash[34466]: audit 2026-04-15T13:52:15.751828+0000 mgr.vm06.qbbldl (mgr.14229) 1056 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:17 vm09 bash[34466]: audit 2026-04-15T13:52:15.751828+0000 mgr.vm06.qbbldl (mgr.14229) 1056 : audit [DBG] from='client.16956 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:17 vm09 bash[34466]: cluster 2026-04-15T13:52:16.432998+0000 mgr.vm06.qbbldl (mgr.14229) 1057 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 173 B/s wr, 54 op/s 2026-04-15T13:52:17.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:17 vm09 bash[34466]: cluster 2026-04-15T13:52:16.432998+0000 mgr.vm06.qbbldl (mgr.14229) 1057 : cluster [DBG] pgmap v591: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 33 KiB/s rd, 173 B/s wr, 54 op/s 2026-04-15T13:52:19.608 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:19 vm06 bash[28114]: cluster 2026-04-15T13:52:18.433393+0000 mgr.vm06.qbbldl (mgr.14229) 1058 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 13 op/s 2026-04-15T13:52:19.608 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:19 vm06 bash[28114]: cluster 2026-04-15T13:52:18.433393+0000 mgr.vm06.qbbldl (mgr.14229) 1058 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 13 op/s 2026-04-15T13:52:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:19 vm09 bash[34466]: cluster 2026-04-15T13:52:18.433393+0000 mgr.vm06.qbbldl (mgr.14229) 1058 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 13 op/s 2026-04-15T13:52:19.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:19 vm09 bash[34466]: cluster 2026-04-15T13:52:18.433393+0000 mgr.vm06.qbbldl (mgr.14229) 1058 : cluster [DBG] pgmap v592: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 8.1 KiB/s rd, 170 B/s wr, 13 op/s 2026-04-15T13:52:21.231 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for haproxy.rgw.foo.vm06.ndmjsv to start 2026-04-15T13:52:21.436 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:21.436 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 stopped 8s ago 16m - - 2026-04-15T13:52:21.436 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 running (16m) 15s ago 16m 4188k - 2.3.17-d1c9119 5479ac79e01f 4bef945ada04 2026-04-15T13:52:21.694 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:52:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:21 vm06 bash[28114]: cluster 2026-04-15T13:52:20.433798+0000 mgr.vm06.qbbldl (mgr.14229) 1059 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:21.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:21 vm06 bash[28114]: cluster 2026-04-15T13:52:20.433798+0000 mgr.vm06.qbbldl (mgr.14229) 1059 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:21 vm09 bash[34466]: cluster 2026-04-15T13:52:20.433798+0000 mgr.vm06.qbbldl (mgr.14229) 1059 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:21.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:21 vm09 bash[34466]: cluster 2026-04-15T13:52:20.433798+0000 mgr.vm06.qbbldl (mgr.14229) 1059 : cluster [DBG] pgmap v593: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.205456+0000 mgr.vm06.qbbldl (mgr.14229) 1060 : audit [DBG] from='client.16964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.205456+0000 mgr.vm06.qbbldl (mgr.14229) 1060 : audit [DBG] from='client.16964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.430022+0000 mgr.vm06.qbbldl (mgr.14229) 1061 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.430022+0000 mgr.vm06.qbbldl (mgr.14229) 1061 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.654183+0000 mon.vm06 (mon.0) 1381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.654183+0000 mon.vm06 (mon.0) 1381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.659433+0000 mon.vm06 (mon.0) 1382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.659433+0000 mon.vm06 (mon.0) 1382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.660511+0000 mon.vm06 (mon.0) 1383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.660511+0000 mon.vm06 (mon.0) 1383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.661197+0000 mon.vm06 (mon.0) 1384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:22.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.661197+0000 mon.vm06 (mon.0) 1384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:22.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.664600+0000 mon.vm06 (mon.0) 1385 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.664600+0000 mon.vm06 (mon.0) 1385 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.666020+0000 mon.vm06 (mon.0) 1386 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:22.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.666020+0000 mon.vm06 (mon.0) 1386 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:22.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.690804+0000 mon.vm06 (mon.0) 1387 : audit [DBG] from='client.? 192.168.123.106:0/761877020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:22.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:22 vm06 bash[28114]: audit 2026-04-15T13:52:21.690804+0000 mon.vm06 (mon.0) 1387 : audit [DBG] from='client.? 192.168.123.106:0/761877020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.205456+0000 mgr.vm06.qbbldl (mgr.14229) 1060 : audit [DBG] from='client.16964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.205456+0000 mgr.vm06.qbbldl (mgr.14229) 1060 : audit [DBG] from='client.16964 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.430022+0000 mgr.vm06.qbbldl (mgr.14229) 1061 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.430022+0000 mgr.vm06.qbbldl (mgr.14229) 1061 : audit [DBG] from='client.16968 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.654183+0000 mon.vm06 (mon.0) 1381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.654183+0000 mon.vm06 (mon.0) 1381 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.659433+0000 mon.vm06 (mon.0) 1382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.659433+0000 mon.vm06 (mon.0) 1382 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.660511+0000 mon.vm06 (mon.0) 1383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.660511+0000 mon.vm06 (mon.0) 1383 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.661197+0000 mon.vm06 (mon.0) 1384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.661197+0000 mon.vm06 (mon.0) 1384 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.664600+0000 mon.vm06 (mon.0) 1385 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.664600+0000 mon.vm06 (mon.0) 1385 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.666020+0000 mon.vm06 (mon.0) 1386 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.666020+0000 mon.vm06 (mon.0) 1386 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.690804+0000 mon.vm06 (mon.0) 1387 : audit [DBG] from='client.? 192.168.123.106:0/761877020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:22.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:22 vm09 bash[34466]: audit 2026-04-15T13:52:21.690804+0000 mon.vm06 (mon.0) 1387 : audit [DBG] from='client.? 192.168.123.106:0/761877020' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:23 vm06 bash[28114]: cluster 2026-04-15T13:52:22.434140+0000 mgr.vm06.qbbldl (mgr.14229) 1062 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:52:23.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:23 vm06 bash[28114]: cluster 2026-04-15T13:52:22.434140+0000 mgr.vm06.qbbldl (mgr.14229) 1062 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:52:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:23 vm09 bash[34466]: cluster 2026-04-15T13:52:22.434140+0000 mgr.vm06.qbbldl (mgr.14229) 1062 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:52:23.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:23 vm09 bash[34466]: cluster 2026-04-15T13:52:22.434140+0000 mgr.vm06.qbbldl (mgr.14229) 1062 : cluster [DBG] pgmap v594: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail 2026-04-15T13:52:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:24 vm06 bash[28114]: audit 2026-04-15T13:52:23.497355+0000 mon.vm06 (mon.0) 1388 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:24.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:24 vm06 bash[28114]: audit 2026-04-15T13:52:23.497355+0000 mon.vm06 (mon.0) 1388 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:24.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:24 vm09 bash[34466]: audit 2026-04-15T13:52:23.497355+0000 mon.vm06 (mon.0) 1388 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:24.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:24 vm09 bash[34466]: audit 2026-04-15T13:52:23.497355+0000 mon.vm06 (mon.0) 1388 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:25.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:25 vm06 bash[28114]: cluster 2026-04-15T13:52:24.434588+0000 mgr.vm06.qbbldl (mgr.14229) 1063 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:25.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:25 vm06 bash[28114]: cluster 2026-04-15T13:52:24.434588+0000 mgr.vm06.qbbldl (mgr.14229) 1063 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:25 vm09 bash[34466]: cluster 2026-04-15T13:52:24.434588+0000 mgr.vm06.qbbldl (mgr.14229) 1063 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:25.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:25 vm09 bash[34466]: cluster 2026-04-15T13:52:24.434588+0000 mgr.vm06.qbbldl (mgr.14229) 1063 : cluster [DBG] pgmap v595: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:26.910 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 running (10s) 5s ago 16m 3972k - 2.3.17-d1c9119 5479ac79e01f cc56a79384f5 2026-04-15T13:52:27.115 INFO:teuthology.orchestra.run.vm06.stdout:Scheduled to stop haproxy.rgw.foo.vm09.xswxmk on host 'vm09' 2026-04-15T13:52:27.334 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for haproxy.rgw.foo.vm09.xswxmk to stop 2026-04-15T13:52:27.511 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:27.511 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 running (11s) 5s ago 16m 3972k - 2.3.17-d1c9119 5479ac79e01f cc56a79384f5 2026-04-15T13:52:27.511 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 running (16m) 21s ago 16m 4188k - 2.3.17-d1c9119 5479ac79e01f 4bef945ada04 2026-04-15T13:52:27.750 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: cluster 2026-04-15T13:52:26.434988+0000 mgr.vm06.qbbldl (mgr.14229) 1064 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: cluster 2026-04-15T13:52:26.434988+0000 mgr.vm06.qbbldl (mgr.14229) 1064 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.105215+0000 mon.vm06 (mon.0) 1389 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.105215+0000 mon.vm06 (mon.0) 1389 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.111271+0000 mon.vm06 (mon.0) 1390 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.111271+0000 mon.vm06 (mon.0) 1390 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.112352+0000 mon.vm06 (mon.0) 1391 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.112352+0000 mon.vm06 (mon.0) 1391 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.113566+0000 mon.vm06 (mon.0) 1392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.113566+0000 mon.vm06 (mon.0) 1392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.114021+0000 mon.vm06 (mon.0) 1393 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.114021+0000 mon.vm06 (mon.0) 1393 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.118540+0000 mon.vm06 (mon.0) 1394 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.118540+0000 mon.vm06 (mon.0) 1394 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.120173+0000 mon.vm06 (mon.0) 1395 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:27.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:27 vm06 bash[28114]: audit 2026-04-15T13:52:27.120173+0000 mon.vm06 (mon.0) 1395 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: cluster 2026-04-15T13:52:26.434988+0000 mgr.vm06.qbbldl (mgr.14229) 1064 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: cluster 2026-04-15T13:52:26.434988+0000 mgr.vm06.qbbldl (mgr.14229) 1064 : cluster [DBG] pgmap v596: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.105215+0000 mon.vm06 (mon.0) 1389 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.105215+0000 mon.vm06 (mon.0) 1389 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.111271+0000 mon.vm06 (mon.0) 1390 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.111271+0000 mon.vm06 (mon.0) 1390 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.112352+0000 mon.vm06 (mon.0) 1391 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.112352+0000 mon.vm06 (mon.0) 1391 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.113566+0000 mon.vm06 (mon.0) 1392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.113566+0000 mon.vm06 (mon.0) 1392 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.114021+0000 mon.vm06 (mon.0) 1393 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.114021+0000 mon.vm06 (mon.0) 1393 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.118540+0000 mon.vm06 (mon.0) 1394 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.118540+0000 mon.vm06 (mon.0) 1394 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.120173+0000 mon.vm06 (mon.0) 1395 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:27.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:27 vm09 bash[34466]: audit 2026-04-15T13:52:27.120173+0000 mon.vm06 (mon.0) 1395 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:26.888793+0000 mgr.vm06.qbbldl (mgr.14229) 1065 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:26.888793+0000 mgr.vm06.qbbldl (mgr.14229) 1065 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.098051+0000 mgr.vm06.qbbldl (mgr.14229) 1066 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.098051+0000 mgr.vm06.qbbldl (mgr.14229) 1066 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: cephadm 2026-04-15T13:52:27.098524+0000 mgr.vm06.qbbldl (mgr.14229) 1067 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: cephadm 2026-04-15T13:52:27.098524+0000 mgr.vm06.qbbldl (mgr.14229) 1067 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.314791+0000 mgr.vm06.qbbldl (mgr.14229) 1068 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.314791+0000 mgr.vm06.qbbldl (mgr.14229) 1068 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.505431+0000 mgr.vm06.qbbldl (mgr.14229) 1069 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.505431+0000 mgr.vm06.qbbldl (mgr.14229) 1069 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.570571+0000 mon.vm06 (mon.0) 1396 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.570571+0000 mon.vm06 (mon.0) 1396 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.576435+0000 mon.vm06 (mon.0) 1397 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.576435+0000 mon.vm06 (mon.0) 1397 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.577189+0000 mon.vm06 (mon.0) 1398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.577189+0000 mon.vm06 (mon.0) 1398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.749181+0000 mon.vm09 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.106:0/1359078240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: audit 2026-04-15T13:52:27.749181+0000 mon.vm09 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.106:0/1359078240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: cluster 2026-04-15T13:52:28.435397+0000 mgr.vm06.qbbldl (mgr.14229) 1070 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:28.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:28 vm06 bash[28114]: cluster 2026-04-15T13:52:28.435397+0000 mgr.vm06.qbbldl (mgr.14229) 1070 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:26.888793+0000 mgr.vm06.qbbldl (mgr.14229) 1065 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:26.888793+0000 mgr.vm06.qbbldl (mgr.14229) 1065 : audit [DBG] from='client.16976 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.098051+0000 mgr.vm06.qbbldl (mgr.14229) 1066 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.098051+0000 mgr.vm06.qbbldl (mgr.14229) 1066 : audit [DBG] from='client.16980 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "stop", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: cephadm 2026-04-15T13:52:27.098524+0000 mgr.vm06.qbbldl (mgr.14229) 1067 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: cephadm 2026-04-15T13:52:27.098524+0000 mgr.vm06.qbbldl (mgr.14229) 1067 : cephadm [INF] Schedule stop daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.314791+0000 mgr.vm06.qbbldl (mgr.14229) 1068 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.314791+0000 mgr.vm06.qbbldl (mgr.14229) 1068 : audit [DBG] from='client.16984 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.505431+0000 mgr.vm06.qbbldl (mgr.14229) 1069 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.505431+0000 mgr.vm06.qbbldl (mgr.14229) 1069 : audit [DBG] from='client.16988 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.570571+0000 mon.vm06 (mon.0) 1396 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.570571+0000 mon.vm06 (mon.0) 1396 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.576435+0000 mon.vm06 (mon.0) 1397 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.576435+0000 mon.vm06 (mon.0) 1397 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.577189+0000 mon.vm06 (mon.0) 1398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.577189+0000 mon.vm06 (mon.0) 1398 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.749181+0000 mon.vm09 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.106:0/1359078240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: audit 2026-04-15T13:52:27.749181+0000 mon.vm09 (mon.1) 37 : audit [DBG] from='client.? 192.168.123.106:0/1359078240' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: cluster 2026-04-15T13:52:28.435397+0000 mgr.vm06.qbbldl (mgr.14229) 1070 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:28.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:28 vm09 bash[34466]: cluster 2026-04-15T13:52:28.435397+0000 mgr.vm06.qbbldl (mgr.14229) 1070 : cluster [DBG] pgmap v597: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:31 vm06 bash[28114]: cluster 2026-04-15T13:52:30.435832+0000 mgr.vm06.qbbldl (mgr.14229) 1071 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:31.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:31 vm06 bash[28114]: cluster 2026-04-15T13:52:30.435832+0000 mgr.vm06.qbbldl (mgr.14229) 1071 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:31 vm09 bash[34466]: cluster 2026-04-15T13:52:30.435832+0000 mgr.vm06.qbbldl (mgr.14229) 1071 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:31.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:31 vm09 bash[34466]: cluster 2026-04-15T13:52:30.435832+0000 mgr.vm06.qbbldl (mgr.14229) 1071 : cluster [DBG] pgmap v598: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:32.956 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 stopped 0s ago 16m - - 2026-04-15T13:52:32.960 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:52:32.961 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:52:32.961 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 182k 0 --:--:-- --:--:-- --:--:-- 182k 2026-04-15T13:52:33.158 INFO:teuthology.orchestra.run.vm06.stdout:anonymousScheduled to start haproxy.rgw.foo.vm09.xswxmk on host 'vm09' 2026-04-15T13:52:33.389 INFO:teuthology.orchestra.run.vm06.stdout:Waiting for haproxy.rgw.foo.vm09.xswxmk to start 2026-04-15T13:52:33.586 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:33.586 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 running (17s) 11s ago 16m 3972k - 2.3.17-d1c9119 5479ac79e01f cc56a79384f5 2026-04-15T13:52:33.586 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 stopped 1s ago 17m - - 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.337347+0000 mon.vm06 (mon.0) 1399 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.337347+0000 mon.vm06 (mon.0) 1399 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.341626+0000 mon.vm06 (mon.0) 1400 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.341626+0000 mon.vm06 (mon.0) 1400 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.342185+0000 mon.vm06 (mon.0) 1401 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.342185+0000 mon.vm06 (mon.0) 1401 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.342568+0000 mon.vm06 (mon.0) 1402 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.342568+0000 mon.vm06 (mon.0) 1402 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.345538+0000 mon.vm06 (mon.0) 1403 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.345538+0000 mon.vm06 (mon.0) 1403 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.346752+0000 mon.vm06 (mon.0) 1404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:32.346752+0000 mon.vm06 (mon.0) 1404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: cluster 2026-04-15T13:52:32.436228+0000 mgr.vm06.qbbldl (mgr.14229) 1072 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: cluster 2026-04-15T13:52:32.436228+0000 mgr.vm06.qbbldl (mgr.14229) 1072 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.149900+0000 mon.vm06 (mon.0) 1405 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.149900+0000 mon.vm06 (mon.0) 1405 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.154199+0000 mon.vm06 (mon.0) 1406 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.154199+0000 mon.vm06 (mon.0) 1406 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.154801+0000 mon.vm06 (mon.0) 1407 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.154801+0000 mon.vm06 (mon.0) 1407 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.155878+0000 mon.vm06 (mon.0) 1408 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.155878+0000 mon.vm06 (mon.0) 1408 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.156283+0000 mon.vm06 (mon.0) 1409 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.156283+0000 mon.vm06 (mon.0) 1409 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.159658+0000 mon.vm06 (mon.0) 1410 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.159658+0000 mon.vm06 (mon.0) 1410 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.160962+0000 mon.vm06 (mon.0) 1411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:33 vm09 bash[34466]: audit 2026-04-15T13:52:33.160962+0000 mon.vm06 (mon.0) 1411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.337347+0000 mon.vm06 (mon.0) 1399 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.337347+0000 mon.vm06 (mon.0) 1399 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.341626+0000 mon.vm06 (mon.0) 1400 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.341626+0000 mon.vm06 (mon.0) 1400 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.342185+0000 mon.vm06 (mon.0) 1401 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.342185+0000 mon.vm06 (mon.0) 1401 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.342568+0000 mon.vm06 (mon.0) 1402 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.342568+0000 mon.vm06 (mon.0) 1402 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.345538+0000 mon.vm06 (mon.0) 1403 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.345538+0000 mon.vm06 (mon.0) 1403 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.346752+0000 mon.vm06 (mon.0) 1404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:32.346752+0000 mon.vm06 (mon.0) 1404 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: cluster 2026-04-15T13:52:32.436228+0000 mgr.vm06.qbbldl (mgr.14229) 1072 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: cluster 2026-04-15T13:52:32.436228+0000 mgr.vm06.qbbldl (mgr.14229) 1072 : cluster [DBG] pgmap v599: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.149900+0000 mon.vm06 (mon.0) 1405 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.149900+0000 mon.vm06 (mon.0) 1405 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.154199+0000 mon.vm06 (mon.0) 1406 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.154199+0000 mon.vm06 (mon.0) 1406 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.154801+0000 mon.vm06 (mon.0) 1407 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.154801+0000 mon.vm06 (mon.0) 1407 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.155878+0000 mon.vm06 (mon.0) 1408 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.155878+0000 mon.vm06 (mon.0) 1408 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.156283+0000 mon.vm06 (mon.0) 1409 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.156283+0000 mon.vm06 (mon.0) 1409 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.159658+0000 mon.vm06 (mon.0) 1410 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.159658+0000 mon.vm06 (mon.0) 1410 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.160962+0000 mon.vm06 (mon.0) 1411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:33 vm06 bash[28114]: audit 2026-04-15T13:52:33.160962+0000 mon.vm06 (mon.0) 1411 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:33.844 INFO:teuthology.orchestra.run.vm06.stdout:HEALTH_OK 2026-04-15T13:52:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:32.936478+0000 mgr.vm06.qbbldl (mgr.14229) 1073 : audit [DBG] from='client.16996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:32.936478+0000 mgr.vm06.qbbldl (mgr.14229) 1073 : audit [DBG] from='client.16996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.144051+0000 mgr.vm06.qbbldl (mgr.14229) 1074 : audit [DBG] from='client.17000 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.144051+0000 mgr.vm06.qbbldl (mgr.14229) 1074 : audit [DBG] from='client.17000 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: cephadm 2026-04-15T13:52:33.144411+0000 mgr.vm06.qbbldl (mgr.14229) 1075 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: cephadm 2026-04-15T13:52:33.144411+0000 mgr.vm06.qbbldl (mgr.14229) 1075 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.366920+0000 mgr.vm06.qbbldl (mgr.14229) 1076 : audit [DBG] from='client.17004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.366920+0000 mgr.vm06.qbbldl (mgr.14229) 1076 : audit [DBG] from='client.17004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.778022+0000 mon.vm06 (mon.0) 1412 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.778022+0000 mon.vm06 (mon.0) 1412 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.782721+0000 mon.vm06 (mon.0) 1413 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.782721+0000 mon.vm06 (mon.0) 1413 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.783629+0000 mon.vm06 (mon.0) 1414 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.783629+0000 mon.vm06 (mon.0) 1414 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.840850+0000 mon.vm06 (mon.0) 1415 : audit [DBG] from='client.? 192.168.123.106:0/1406175236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:34.610 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:34 vm09 bash[34466]: audit 2026-04-15T13:52:33.840850+0000 mon.vm06 (mon.0) 1415 : audit [DBG] from='client.? 192.168.123.106:0/1406175236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:32.936478+0000 mgr.vm06.qbbldl (mgr.14229) 1073 : audit [DBG] from='client.16996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:32.936478+0000 mgr.vm06.qbbldl (mgr.14229) 1073 : audit [DBG] from='client.16996 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.144051+0000 mgr.vm06.qbbldl (mgr.14229) 1074 : audit [DBG] from='client.17000 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.144051+0000 mgr.vm06.qbbldl (mgr.14229) 1074 : audit [DBG] from='client.17000 -' entity='client.admin' cmd=[{"prefix": "orch daemon", "action": "start", "name": "haproxy.rgw.foo.vm09.xswxmk", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: cephadm 2026-04-15T13:52:33.144411+0000 mgr.vm06.qbbldl (mgr.14229) 1075 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: cephadm 2026-04-15T13:52:33.144411+0000 mgr.vm06.qbbldl (mgr.14229) 1075 : cephadm [INF] Schedule start daemon haproxy.rgw.foo.vm09.xswxmk 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.366920+0000 mgr.vm06.qbbldl (mgr.14229) 1076 : audit [DBG] from='client.17004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.366920+0000 mgr.vm06.qbbldl (mgr.14229) 1076 : audit [DBG] from='client.17004 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.778022+0000 mon.vm06 (mon.0) 1412 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.778022+0000 mon.vm06 (mon.0) 1412 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.782721+0000 mon.vm06 (mon.0) 1413 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.782721+0000 mon.vm06 (mon.0) 1413 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.783629+0000 mon.vm06 (mon.0) 1414 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.783629+0000 mon.vm06 (mon.0) 1414 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config dump", "format": "json"} : dispatch 2026-04-15T13:52:34.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.840850+0000 mon.vm06 (mon.0) 1415 : audit [DBG] from='client.? 192.168.123.106:0/1406175236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:34.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:34 vm06 bash[28114]: audit 2026-04-15T13:52:33.840850+0000 mon.vm06 (mon.0) 1415 : audit [DBG] from='client.? 192.168.123.106:0/1406175236' entity='client.admin' cmd={"prefix": "health", "detail": "detail"} : dispatch 2026-04-15T13:52:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:35 vm06 bash[28114]: audit 2026-04-15T13:52:33.578989+0000 mgr.vm06.qbbldl (mgr.14229) 1077 : audit [DBG] from='client.17008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:35 vm06 bash[28114]: audit 2026-04-15T13:52:33.578989+0000 mgr.vm06.qbbldl (mgr.14229) 1077 : audit [DBG] from='client.17008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:35 vm06 bash[28114]: cluster 2026-04-15T13:52:34.436651+0000 mgr.vm06.qbbldl (mgr.14229) 1078 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T13:52:35.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:35 vm06 bash[28114]: cluster 2026-04-15T13:52:34.436651+0000 mgr.vm06.qbbldl (mgr.14229) 1078 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T13:52:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:35 vm09 bash[34466]: audit 2026-04-15T13:52:33.578989+0000 mgr.vm06.qbbldl (mgr.14229) 1077 : audit [DBG] from='client.17008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:35 vm09 bash[34466]: audit 2026-04-15T13:52:33.578989+0000 mgr.vm06.qbbldl (mgr.14229) 1077 : audit [DBG] from='client.17008 -' entity='client.admin' cmd=[{"prefix": "orch ps", "daemon_type": "haproxy", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:35 vm09 bash[34466]: cluster 2026-04-15T13:52:34.436651+0000 mgr.vm06.qbbldl (mgr.14229) 1078 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T13:52:35.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:35 vm09 bash[34466]: cluster 2026-04-15T13:52:34.436651+0000 mgr.vm06.qbbldl (mgr.14229) 1078 : cluster [DBG] pgmap v600: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 255 B/s rd, 511 B/s wr, 0 op/s 2026-04-15T13:52:37.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:37 vm06 bash[28114]: cluster 2026-04-15T13:52:36.437014+0000 mgr.vm06.qbbldl (mgr.14229) 1079 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:37.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:37 vm06 bash[28114]: cluster 2026-04-15T13:52:36.437014+0000 mgr.vm06.qbbldl (mgr.14229) 1079 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:37 vm09 bash[34466]: cluster 2026-04-15T13:52:36.437014+0000 mgr.vm06.qbbldl (mgr.14229) 1079 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:37.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:37 vm09 bash[34466]: cluster 2026-04-15T13:52:36.437014+0000 mgr.vm06.qbbldl (mgr.14229) 1079 : cluster [DBG] pgmap v601: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:39.052 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 running (5s) 0s ago 17m 3907k - 2.3.17-d1c9119 5479ac79e01f 184a7d5a9750 2026-04-15T13:52:39.059 INFO:teuthology.orchestra.run.vm06.stderr: % Total % Received % Xferd Average Speed Time Time Time Current 2026-04-15T13:52:39.061 INFO:teuthology.orchestra.run.vm06.stderr: Dload Upload Total Spent Left Speed 2026-04-15T13:52:39.063 INFO:teuthology.orchestra.run.vm06.stderr: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 187 0 187 0 0 37400 0 --:--:-- --:--:-- --:--:-- 37400 2026-04-15T13:52:39.143 INFO:teuthology.orchestra.run.vm06.stdout:anonymous 2026-04-15T13:52:39.143 INFO:teuthology.run_tasks:Running task cephadm.shell... 2026-04-15T13:52:39.146 INFO:tasks.cephadm:Running commands on role host.a host ubuntu@vm06.local 2026-04-15T13:52:39.146 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'stat -c '"'"'%u %g'"'"' /var/log/ceph | grep '"'"'167 167'"'"'' 2026-04-15T13:52:39.421 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:39.507 INFO:teuthology.orchestra.run.vm06.stdout:167 167 2026-04-15T13:52:39.557 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch status' 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: cluster 2026-04-15T13:52:38.437412+0000 mgr.vm06.qbbldl (mgr.14229) 1080 : cluster [DBG] pgmap v602: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: cluster 2026-04-15T13:52:38.437412+0000 mgr.vm06.qbbldl (mgr.14229) 1080 : cluster [DBG] pgmap v602: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.497883+0000 mon.vm06 (mon.0) 1416 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.497883+0000 mon.vm06 (mon.0) 1416 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.626895+0000 mon.vm06 (mon.0) 1417 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.626895+0000 mon.vm06 (mon.0) 1417 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.631397+0000 mon.vm06 (mon.0) 1418 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.631397+0000 mon.vm06 (mon.0) 1418 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.632198+0000 mon.vm06 (mon.0) 1419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.632198+0000 mon.vm06 (mon.0) 1419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:39.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.632687+0000 mon.vm06 (mon.0) 1420 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.632687+0000 mon.vm06 (mon.0) 1420 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.636351+0000 mon.vm06 (mon.0) 1421 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.636351+0000 mon.vm06 (mon.0) 1421 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.637594+0000 mon.vm06 (mon.0) 1422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:39.767 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:39 vm06 bash[28114]: audit 2026-04-15T13:52:38.637594+0000 mon.vm06 (mon.0) 1422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:39.832 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: cluster 2026-04-15T13:52:38.437412+0000 mgr.vm06.qbbldl (mgr.14229) 1080 : cluster [DBG] pgmap v602: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: cluster 2026-04-15T13:52:38.437412+0000 mgr.vm06.qbbldl (mgr.14229) 1080 : cluster [DBG] pgmap v602: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 85 B/s rd, 170 B/s wr, 0 op/s 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.497883+0000 mon.vm06 (mon.0) 1416 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.497883+0000 mon.vm06 (mon.0) 1416 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd blocklist ls", "format": "json"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.626895+0000 mon.vm06 (mon.0) 1417 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.626895+0000 mon.vm06 (mon.0) 1417 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.631397+0000 mon.vm06 (mon.0) 1418 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.631397+0000 mon.vm06 (mon.0) 1418 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.632198+0000 mon.vm06 (mon.0) 1419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.632198+0000 mon.vm06 (mon.0) 1419 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config generate-minimal-conf"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.632687+0000 mon.vm06 (mon.0) 1420 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.632687+0000 mon.vm06 (mon.0) 1420 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "auth get", "entity": "client.admin"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.636351+0000 mon.vm06 (mon.0) 1421 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.636351+0000 mon.vm06 (mon.0) 1421 : audit [INF] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.637594+0000 mon.vm06 (mon.0) 1422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:39.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:39 vm09 bash[34466]: audit 2026-04-15T13:52:38.637594+0000 mon.vm06 (mon.0) 1422 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "osd tree", "states": ["destroyed"], "format": "json"} : dispatch 2026-04-15T13:52:40.224 INFO:teuthology.orchestra.run.vm06.stdout:Backend: cephadm 2026-04-15T13:52:40.224 INFO:teuthology.orchestra.run.vm06.stdout:Available: Yes 2026-04-15T13:52:40.224 INFO:teuthology.orchestra.run.vm06.stdout:Paused: No 2026-04-15T13:52:40.294 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch ps' 2026-04-15T13:52:40.561 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:40.577 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:40 vm06 bash[28114]: audit 2026-04-15T13:52:39.032710+0000 mgr.vm06.qbbldl (mgr.14229) 1081 : audit [DBG] from='client.17016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.577 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:40 vm06 bash[28114]: audit 2026-04-15T13:52:39.032710+0000 mgr.vm06.qbbldl (mgr.14229) 1081 : audit [DBG] from='client.17016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.577 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:40 vm06 bash[28114]: audit 2026-04-15T13:52:40.220515+0000 mgr.vm06.qbbldl (mgr.14229) 1082 : audit [DBG] from='client.17020 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.577 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:40 vm06 bash[28114]: audit 2026-04-15T13:52:40.220515+0000 mgr.vm06.qbbldl (mgr.14229) 1082 : audit [DBG] from='client.17020 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.577 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:40 vm06 bash[28114]: cluster 2026-04-15T13:52:40.437863+0000 mgr.vm06.qbbldl (mgr.14229) 1083 : cluster [DBG] pgmap v603: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:40.577 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:40 vm06 bash[28114]: cluster 2026-04-15T13:52:40.437863+0000 mgr.vm06.qbbldl (mgr.14229) 1083 : cluster [DBG] pgmap v603: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:40 vm09 bash[34466]: audit 2026-04-15T13:52:39.032710+0000 mgr.vm06.qbbldl (mgr.14229) 1081 : audit [DBG] from='client.17016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:40 vm09 bash[34466]: audit 2026-04-15T13:52:39.032710+0000 mgr.vm06.qbbldl (mgr.14229) 1081 : audit [DBG] from='client.17016 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:40 vm09 bash[34466]: audit 2026-04-15T13:52:40.220515+0000 mgr.vm06.qbbldl (mgr.14229) 1082 : audit [DBG] from='client.17020 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:40 vm09 bash[34466]: audit 2026-04-15T13:52:40.220515+0000 mgr.vm06.qbbldl (mgr.14229) 1082 : audit [DBG] from='client.17020 -' entity='client.admin' cmd=[{"prefix": "orch status", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:40 vm09 bash[34466]: cluster 2026-04-15T13:52:40.437863+0000 mgr.vm06.qbbldl (mgr.14229) 1083 : cluster [DBG] pgmap v603: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:40.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:40 vm09 bash[34466]: cluster 2026-04-15T13:52:40.437863+0000 mgr.vm06.qbbldl (mgr.14229) 1083 : cluster [DBG] pgmap v603: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:NAME HOST PORTS STATUS REFRESHED AGE MEM USE MEM LIM VERSION IMAGE ID CONTAINER ID 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager.vm06 vm06 *:9093,9094 running (18m) 19s ago 18m 14.3M - 0.28.1 27c475db5fb1 26e3c2dc7d16 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm06 vm06 *:9926 running (19m) 19s ago 19m 10.2M - 20.2.0-19-g7ec4401a095 b4cb326006c0 b74aec347cb5 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter.vm09 vm09 *:9926 running (18m) 2s ago 18m 10.1M - 20.2.0-19-g7ec4401a095 b4cb326006c0 f4fa3cdfd9ed 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm06 vm06 running (19m) 19s ago 19m 10.7M - 20.2.0-19-g7ec4401a095 b4cb326006c0 2a2696a315a2 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:crash.vm09 vm09 running (18m) 2s ago 18m 11.6M - 20.2.0-19-g7ec4401a095 b4cb326006c0 c073efaeb27a 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:grafana.vm06 vm06 *:3000 running (18m) 19s ago 18m 120M - 12.2.0 74144189b384 38104cd71d42 2026-04-15T13:52:40.926 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm06.ndmjsv vm06 *:9000,9001 running (24s) 19s ago 17m 3972k - 2.3.17-d1c9119 5479ac79e01f cc56a79384f5 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:haproxy.rgw.foo.vm09.xswxmk vm09 *:9000,9001 running (7s) 2s ago 17m 3907k - 2.3.17-d1c9119 5479ac79e01f 184a7d5a9750 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:keepalived.rgw.foo.vm06.mhhxjk vm06 running (16m) 19s ago 16m 2471k - 2.2.4 93f9db46da49 733a3208daaa 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:keepalived.rgw.foo.vm09.cfsofe vm09 running (16m) 2s ago 16m 2496k - 2.2.4 93f9db46da49 0fe3ad963d34 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm06.qbbldl vm06 *:9283,8765,8443 running (19m) 19s ago 19m 558M - 20.2.0-19-g7ec4401a095 b4cb326006c0 afc83296061d 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:mgr.vm09.kpawde vm09 *:8443,9283,8765 running (18m) 2s ago 18m 476M - 20.2.0-19-g7ec4401a095 b4cb326006c0 9c10fb3b60f6 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm06 vm06 running (19m) 19s ago 20m 69.1M 2048M 20.2.0-19-g7ec4401a095 b4cb326006c0 713ee534aa80 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:mon.vm09 vm09 running (18m) 2s ago 18m 49.5M 2048M 20.2.0-19-g7ec4401a095 b4cb326006c0 e5c118d71075 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm06 vm06 *:9100 running (18m) 19s ago 19m 9.97M - 1.9.1 d00a542e409e b00e05757d31 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter.vm09 vm09 *:9100 running (18m) 2s ago 18m 10.4M - 1.9.1 d00a542e409e 632df45ce8d1 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.0 vm09 running (17m) 2s ago 17m 66.6M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 b5fa6b7c6859 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.1 vm06 running (17m) 19s ago 17m 68.2M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 9b7af0855b1c 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.2 vm09 running (17m) 2s ago 17m 64.8M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 d71897b20df0 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.3 vm06 running (17m) 19s ago 17m 74.2M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 97f5165630d1 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.4 vm09 running (17m) 2s ago 17m 62.0M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 5d8caca6897f 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.5 vm06 running (17m) 19s ago 17m 71.2M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 16132db1706e 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.6 vm09 running (17m) 2s ago 17m 60.1M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 2f5d7954c0d4 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:osd.7 vm06 running (17m) 19s ago 17m 72.5M 4096M 20.2.0-19-g7ec4401a095 b4cb326006c0 78508029aef6 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:prometheus.vm06 vm06 *:9095 running (16m) 19s ago 18m 51.1M - 3.6.0 76947e7ef22f ac6b4f307728 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.landug vm06 *:8001 running (16m) 19s ago 17m 136M - 20.2.0-19-g7ec4401a095 b4cb326006c0 83775e5cd19f 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm06.liyzhd vm06 *:8000 running (11m) 19s ago 17m 129M - 20.2.0-19-g7ec4401a095 b4cb326006c0 57a0cf0484de 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.iwshxg vm09 *:8000 running (5m) 2s ago 17m 118M - 20.2.0-19-g7ec4401a095 b4cb326006c0 fa1d79d02111 2026-04-15T13:52:40.927 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo.vm09.pxnsqu vm09 *:8001 running (39s) 2s ago 17m 95.8M - 20.2.0-19-g7ec4401a095 b4cb326006c0 d38ed17e823f 2026-04-15T13:52:40.990 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch ls' 2026-04-15T13:52:41.240 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:41.616 INFO:teuthology.orchestra.run.vm06.stdout:NAME PORTS RUNNING REFRESHED AGE PLACEMENT 2026-04-15T13:52:41.616 INFO:teuthology.orchestra.run.vm06.stdout:alertmanager ?:9093,9094 1/1 19s ago 19m count:1 2026-04-15T13:52:41.616 INFO:teuthology.orchestra.run.vm06.stdout:ceph-exporter ?:9926 2/2 19s ago 19m * 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:crash 2/2 19s ago 19m * 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:grafana ?:3000 1/1 19s ago 19m count:1 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:ingress.rgw.foo 12.12.1.106:9000,9001 4/4 19s ago 17m count:2 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:mgr 2/2 19s ago 19m count:2 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:mon 2/2 19s ago 18m vm06:192.168.123.106=vm06;vm09:192.168.123.109=vm09;count:2 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:node-exporter ?:9100 2/2 19s ago 19m * 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:osd.all-available-devices 8 19s ago 18m * 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:prometheus ?:9095 1/1 19s ago 19m count:1 2026-04-15T13:52:41.617 INFO:teuthology.orchestra.run.vm06.stdout:rgw.foo ?:8000,8001 4/4 19s ago 17m count:4;* 2026-04-15T13:52:41.712 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch host ls' 2026-04-15T13:52:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:41 vm06 bash[28114]: audit 2026-04-15T13:52:40.917344+0000 mgr.vm06.qbbldl (mgr.14229) 1084 : audit [DBG] from='client.17024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:41.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:41 vm06 bash[28114]: audit 2026-04-15T13:52:40.917344+0000 mgr.vm06.qbbldl (mgr.14229) 1084 : audit [DBG] from='client.17024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:41 vm09 bash[34466]: audit 2026-04-15T13:52:40.917344+0000 mgr.vm06.qbbldl (mgr.14229) 1084 : audit [DBG] from='client.17024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:41.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:41 vm09 bash[34466]: audit 2026-04-15T13:52:40.917344+0000 mgr.vm06.qbbldl (mgr.14229) 1084 : audit [DBG] from='client.17024 -' entity='client.admin' cmd=[{"prefix": "orch ps", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:41.970 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:42.337 INFO:teuthology.orchestra.run.vm06.stdout:HOST ADDR LABELS STATUS 2026-04-15T13:52:42.337 INFO:teuthology.orchestra.run.vm06.stdout:vm06 192.168.123.106 2026-04-15T13:52:42.337 INFO:teuthology.orchestra.run.vm06.stdout:vm09 192.168.123.109 2026-04-15T13:52:42.337 INFO:teuthology.orchestra.run.vm06.stdout:2 hosts in cluster 2026-04-15T13:52:42.401 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch device ls' 2026-04-15T13:52:42.659 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.607581+0000 mgr.vm06.qbbldl (mgr.14229) 1085 : audit [DBG] from='client.17028 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.607581+0000 mgr.vm06.qbbldl (mgr.14229) 1085 : audit [DBG] from='client.17028 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.608460+0000 mon.vm06 (mon.0) 1423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.608460+0000 mon.vm06 (mon.0) 1423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.609326+0000 mon.vm06 (mon.0) 1424 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.609326+0000 mon.vm06 (mon.0) 1424 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.609873+0000 mon.vm06 (mon.0) 1425 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.609873+0000 mon.vm06 (mon.0) 1425 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.610363+0000 mon.vm06 (mon.0) 1426 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:41.610363+0000 mon.vm06 (mon.0) 1426 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:42.333825+0000 mgr.vm06.qbbldl (mgr.14229) 1086 : audit [DBG] from='client.17032 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: audit 2026-04-15T13:52:42.333825+0000 mgr.vm06.qbbldl (mgr.14229) 1086 : audit [DBG] from='client.17032 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: cluster 2026-04-15T13:52:42.438190+0000 mgr.vm06.qbbldl (mgr.14229) 1087 : cluster [DBG] pgmap v604: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:42.678 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:42 vm06 bash[28114]: cluster 2026-04-15T13:52:42.438190+0000 mgr.vm06.qbbldl (mgr.14229) 1087 : cluster [DBG] pgmap v604: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.607581+0000 mgr.vm06.qbbldl (mgr.14229) 1085 : audit [DBG] from='client.17028 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.607581+0000 mgr.vm06.qbbldl (mgr.14229) 1085 : audit [DBG] from='client.17028 -' entity='client.admin' cmd=[{"prefix": "orch ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.608460+0000 mon.vm06 (mon.0) 1423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.608460+0000 mon.vm06 (mon.0) 1423 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.609326+0000 mon.vm06 (mon.0) 1424 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.609326+0000 mon.vm06 (mon.0) 1424 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.609873+0000 mon.vm06 (mon.0) 1425 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.609873+0000 mon.vm06 (mon.0) 1425 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.610363+0000 mon.vm06 (mon.0) 1426 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:41.610363+0000 mon.vm06 (mon.0) 1426 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:42.333825+0000 mgr.vm06.qbbldl (mgr.14229) 1086 : audit [DBG] from='client.17032 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.859 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: audit 2026-04-15T13:52:42.333825+0000 mgr.vm06.qbbldl (mgr.14229) 1086 : audit [DBG] from='client.17032 -' entity='client.admin' cmd=[{"prefix": "orch host ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:42.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: cluster 2026-04-15T13:52:42.438190+0000 mgr.vm06.qbbldl (mgr.14229) 1087 : cluster [DBG] pgmap v604: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:42.860 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:42 vm09 bash[34466]: cluster 2026-04-15T13:52:42.438190+0000 mgr.vm06.qbbldl (mgr.14229) 1087 : cluster [DBG] pgmap v604: 129 pgs: 129 active+clean; 587 KiB data, 216 MiB used, 160 GiB / 160 GiB avail; 170 B/s rd, 341 B/s wr, 0 op/s 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:HOST PATH TYPE DEVICE ID SIZE AVAILABLE REFRESHED REJECT REASONS 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme0n1 ssd Linux_c12cb9df1acb8205e4f3 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme1n1 ssd Linux_f4056127debabf608d65 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme2n1 ssd Linux_c827407f82fbaf83f886 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/nvme3n1 ssd Linux_81c0ecdf70e1ab991990 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17m ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdb hdd DWNBRSTVMM06001 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdc hdd DWNBRSTVMM06002 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vdd hdd DWNBRSTVMM06003 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm06 /dev/vde hdd DWNBRSTVMM06004 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme0n1 ssd Linux_b804cfa419b2e6ee38b2 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme1n1 ssd Linux_d32cd802138d477b0c27 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme2n1 ssd Linux_1b2c890b6b49bb6c8abc 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/nvme3n1 ssd Linux_e1183b90f8221b5791e4 19.9G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/sr0 hdd QEMU_DVD-ROM_QM00003 366k No 17m ago Has a FileSystem, Insufficient space (<5GB) 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdb hdd DWNBRSTVMM09001 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdc hdd DWNBRSTVMM09002 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vdd hdd DWNBRSTVMM09003 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.012 INFO:teuthology.orchestra.run.vm06.stdout:vm09 /dev/vde hdd DWNBRSTVMM09004 20.0G No 17m ago Has a FileSystem, Insufficient space (<10 extents) on vgs, LVM detected 2026-04-15T13:52:43.081 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- bash -c 'ceph orch ls | grep '"'"'^osd.all-available-devices '"'"'' 2026-04-15T13:52:43.333 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:43.709 INFO:teuthology.orchestra.run.vm06.stdout:osd.all-available-devices 8 22s ago 18m * 2026-04-15T13:52:43.764 DEBUG:teuthology.run_tasks:Unwinding manager vip 2026-04-15T13:52:43.766 INFO:tasks.vip:Removing 12.12.0.106 (and any VIPs) on vm06.local iface ens3... 2026-04-15T13:52:43.766 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr del 12.12.0.106/22 dev ens3 2026-04-15T13:52:43.777 DEBUG:teuthology.orchestra.run.vm06:> sudo ip addr del 12.12.1.106/22 dev ens3 2026-04-15T13:52:43.833 INFO:tasks.vip:Removing 12.12.0.109 (and any VIPs) on vm09.local iface ens3... 2026-04-15T13:52:43.833 DEBUG:teuthology.orchestra.run.vm09:> sudo ip addr del 12.12.0.109/22 dev ens3 2026-04-15T13:52:43.841 DEBUG:teuthology.orchestra.run.vm09:> sudo ip addr del 12.12.1.106/22 dev ens3 2026-04-15T13:52:43.891 INFO:teuthology.orchestra.run.vm09.stderr:RTNETLINK answers: Cannot assign requested address 2026-04-15T13:52:43.892 DEBUG:teuthology.orchestra.run:got remote process result: 2 2026-04-15T13:52:43.892 DEBUG:teuthology.run_tasks:Unwinding manager cephadm 2026-04-15T13:52:43.894 INFO:tasks.cephadm:Teardown begin 2026-04-15T13:52:43.894 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:52:43.902 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:52:43.939 INFO:tasks.cephadm:Disabling cephadm mgr module 2026-04-15T13:52:43.939 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm --image harbor.clyso.com/custom-ceph/ceph/ceph:sse-s3-kmip-preview-not-for-production-5 shell -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b -- ceph mgr module disable cephadm 2026-04-15T13:52:44.224 INFO:teuthology.orchestra.run.vm06.stderr:Inferring config /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/mon.vm06/config 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.006733+0000 mgr.vm06.qbbldl (mgr.14229) 1088 : audit [DBG] from='client.17036 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.006733+0000 mgr.vm06.qbbldl (mgr.14229) 1088 : audit [DBG] from='client.17036 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.675920+0000 mon.vm06 (mon.0) 1427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.675920+0000 mon.vm06 (mon.0) 1427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.676677+0000 mon.vm06 (mon.0) 1428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.676677+0000 mon.vm06 (mon.0) 1428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.683069+0000 mon.vm06 (mon.0) 1429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.683069+0000 mon.vm06 (mon.0) 1429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.688375+0000 mon.vm06 (mon.0) 1430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.442 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: audit 2026-04-15T13:52:43.688375+0000 mon.vm06 (mon.0) 1430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 auth: error reading file: /etc/ceph/ceph.keyring: bufferlist::read_file(/etc/ceph/ceph.keyring): read error:(21) Is a directory 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 auth: failed to load /etc/ceph/ceph.keyring: (21) Is a directory 2026-04-15T13:52:44.458 INFO:teuthology.orchestra.run.vm06.stderr:2026-04-15T13:52:44.448+0000 7f0b456fe640 -1 monclient: keyring not found 2026-04-15T13:52:44.459 INFO:teuthology.orchestra.run.vm06.stderr:[errno 21] error connecting to the cluster 2026-04-15T13:52:44.538 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:52:44.538 INFO:tasks.cephadm:Cleaning up testdir ceph.* files... 2026-04-15T13:52:44.538 DEBUG:teuthology.orchestra.run.vm06:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-15T13:52:44.541 DEBUG:teuthology.orchestra.run.vm09:> rm -f /home/ubuntu/cephtest/seed.ceph.conf /home/ubuntu/cephtest/ceph.pub 2026-04-15T13:52:44.544 INFO:tasks.cephadm:Stopping all daemons... 2026-04-15T13:52:44.544 INFO:tasks.cephadm.mon.vm06:Stopping mon.vm06... 2026-04-15T13:52:44.545 DEBUG:teuthology.orchestra.run.vm06:> sudo systemctl stop ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.006733+0000 mgr.vm06.qbbldl (mgr.14229) 1088 : audit [DBG] from='client.17036 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.006733+0000 mgr.vm06.qbbldl (mgr.14229) 1088 : audit [DBG] from='client.17036 -' entity='client.admin' cmd=[{"prefix": "orch device ls", "target": ["mon-mgr", ""]}]: dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.675920+0000 mon.vm06 (mon.0) 1427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.675920+0000 mon.vm06 (mon.0) 1427 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.landug", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.676677+0000 mon.vm06 (mon.0) 1428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.676677+0000 mon.vm06 (mon.0) 1428 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm06.liyzhd", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.683069+0000 mon.vm06 (mon.0) 1429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.683069+0000 mon.vm06 (mon.0) 1429 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.pxnsqu", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.688375+0000 mon.vm06 (mon.0) 1430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.609 INFO:journalctl@ceph.mon.vm09.vm09.stdout:Apr 15 13:52:44 vm09 bash[34466]: audit 2026-04-15T13:52:43.688375+0000 mon.vm06 (mon.0) 1430 : audit [DBG] from='mgr.14229 192.168.123.106:0/2848527476' entity='mgr.vm06.qbbldl' cmd={"prefix": "config get", "who": "client.rgw.foo.vm09.iwshxg", "name": "rgw_frontends"} : dispatch 2026-04-15T13:52:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 systemd[1]: Stopping Ceph mon.vm06 for 75e42418-38cf-11f1-9300-4fe77ac4445b... 2026-04-15T13:52:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: debug 2026-04-15T13:52:44.628+0000 7f6b17704640 -1 received signal: Terminated from /sbin/docker-init -- /usr/bin/ceph-mon -n mon.vm06 -f --setuser ceph --setgroup ceph --default-log-to-file=false --default-log-to-stderr=true --default-log-stderr-prefix=debug --default-mon-cluster-log-to-file=false --default-mon-cluster-log-to-stderr=true (PID: 1) UID: 0 2026-04-15T13:52:44.766 INFO:journalctl@ceph.mon.vm06.vm06.stdout:Apr 15 13:52:44 vm06 bash[28114]: debug 2026-04-15T13:52:44.628+0000 7f6b17704640 -1 mon.vm06@0(leader) e2 *** Got Signal Terminated *** 2026-04-15T13:52:44.928 DEBUG:teuthology.orchestra.run.vm06:> sudo pkill -f 'journalctl -f -n 0 -u ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm06.service' 2026-04-15T13:52:44.957 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-15T13:52:44.957 INFO:tasks.cephadm.mon.vm06:Stopped mon.vm06 2026-04-15T13:52:44.957 INFO:tasks.cephadm.mon.vm09:Stopping mon.vm09... 2026-04-15T13:52:44.957 DEBUG:teuthology.orchestra.run.vm09:> sudo systemctl stop ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm09 2026-04-15T13:52:45.113 DEBUG:teuthology.orchestra.run.vm09:> sudo pkill -f 'journalctl -f -n 0 -u ceph-75e42418-38cf-11f1-9300-4fe77ac4445b@mon.vm09.service' 2026-04-15T13:52:45.138 DEBUG:teuthology.orchestra.run:got remote process result: None 2026-04-15T13:52:45.138 INFO:tasks.cephadm.mon.vm09:Stopped mon.vm09 2026-04-15T13:52:45.138 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b --force --keep-logs 2026-04-15T13:52:45.409 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:53:35.191 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b --force --keep-logs 2026-04-15T13:53:35.440 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:54:23.965 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:54:23.973 INFO:teuthology.orchestra.run.vm06.stderr:rm: cannot remove '/etc/ceph/ceph.client.admin.keyring': Is a directory 2026-04-15T13:54:23.974 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:54:23.974 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f /etc/ceph/ceph.conf /etc/ceph/ceph.client.admin.keyring 2026-04-15T13:54:23.982 INFO:tasks.cephadm:Archiving crash dumps... 2026-04-15T13:54:23.982 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/crash to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353/remote/vm06/crash 2026-04-15T13:54:23.982 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/crash -- . 2026-04-15T13:54:24.022 INFO:teuthology.orchestra.run.vm06.stderr:tar: /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/crash: Cannot open: No such file or directory 2026-04-15T13:54:24.023 INFO:teuthology.orchestra.run.vm06.stderr:tar: Error is not recoverable: exiting now 2026-04-15T13:54:24.023 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/crash to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353/remote/vm09/crash 2026-04-15T13:54:24.023 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/crash -- . 2026-04-15T13:54:24.032 INFO:teuthology.orchestra.run.vm09.stderr:tar: /var/lib/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/crash: Cannot open: No such file or directory 2026-04-15T13:54:24.032 INFO:teuthology.orchestra.run.vm09.stderr:tar: Error is not recoverable: exiting now 2026-04-15T13:54:24.033 INFO:tasks.cephadm:Checking cluster log for badness... 2026-04-15T13:54:24.033 DEBUG:teuthology.orchestra.run.vm06:> sudo egrep '\[ERR\]|\[WRN\]|\[SEC\]' /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log | egrep CEPHADM_ | egrep -v '\(MDS_ALL_DOWN\)' | egrep -v '\(MDS_UP_LESS_THAN_MAX\)' | egrep -v CEPHADM_DAEMON_PLACE_FAIL | egrep -v CEPHADM_FAILED_DAEMON | head -n 1 2026-04-15T13:54:24.073 INFO:tasks.cephadm:Compressing logs... 2026-04-15T13:54:24.073 DEBUG:teuthology.orchestra.run.vm06:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T13:54:24.116 DEBUG:teuthology.orchestra.run.vm09:> time sudo find /var/log/ceph /var/log/rbd-target-api -name '*.log' -print0 | sudo xargs --max-args=1 --max-procs=0 --verbose -0 --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T13:54:24.124 INFO:teuthology.orchestra.run.vm06.stderr:find: ‘/var/log/rbd-target-api’: No such file or directory 2026-04-15T13:54:24.125 INFO:teuthology.orchestra.run.vm09.stderr:find: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.audit.log 2026-04-15T13:54:24.125 INFO:teuthology.orchestra.run.vm09.stderr:‘/var/log/rbd-target-api’: No such file or directory 2026-04-15T13:54:24.125 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mgr.vm09.kpawde.log 2026-04-15T13:54:24.126 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.audit.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mon.vm09.log 2026-04-15T13:54:24.126 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mgr.vm09.kpawde.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm09.pxnsqu.log 2026-04-15T13:54:24.126 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log 2026-04-15T13:54:24.126 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm06.liyzhd.log 2026-04-15T13:54:24.126 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.audit.log 2026-04-15T13:54:24.126 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm06.liyzhd.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.7.log 2026-04-15T13:54:24.127 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm06.landug.log 2026-04-15T13:54:24.127 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.7.log: /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.audit.log: gzip/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm06.landug.log: -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mgr.vm06.qbbldl.log 2026-04-15T13:54:24.127 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mon.vm09.log: /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm09.pxnsqu.log: 91.5% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.audit.log.gz 2026-04-15T13:54:24.128 INFO:teuthology.orchestra.run.vm09.stderr: 92.2% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm09.pxnsqu.log.gz 2026-04-15T13:54:24.129 INFO:teuthology.orchestra.run.vm06.stderr: 92.1% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm06.liyzhd.log.gz 2026-04-15T13:54:24.129 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.0.log 2026-04-15T13:54:24.129 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.cephadm.log 2026-04-15T13:54:24.129 INFO:teuthology.orchestra.run.vm09.stderr: 92.7% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mgr.vm09.kpawde.log.gz 2026-04-15T13:54:24.130 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mgr.vm06.qbbldl.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm06.liyzhd.log 2026-04-15T13:54:24.130 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.0.log: 90.3% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log.gz 2026-04-15T13:54:24.130 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.4.log 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm06.stderr: 93.5% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm06.landug.log.gz 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.ceph-exporter.vm06.log 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm06.stderr: 91.3% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.audit.log.gz 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.cephadm.log: 82.6% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.cephadm.log.gz 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm09.iwshxg.log 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.4.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-volume.log 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.3.log 2026-04-15T13:54:24.131 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.ceph-exporter.vm06.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log 2026-04-15T13:54:24.132 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.3.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mon.vm06.log 2026-04-15T13:54:24.132 INFO:teuthology.orchestra.run.vm06.stderr: 94.7% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.ceph-exporter.vm06.log.gz 2026-04-15T13:54:24.134 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm09.iwshxg.log: 93.6% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm09.iwshxg.log.gz 2026-04-15T13:54:24.136 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log: 90.1% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.log.gz 2026-04-15T13:54:24.137 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm06.liyzhd.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.cephadm.log 2026-04-15T13:54:24.139 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mon.vm06.log: 93.4% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm06.liyzhd.log.gz 2026-04-15T13:54:24.142 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.ceph-exporter.vm09.log 2026-04-15T13:54:24.154 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm09.iwshxg.log 2026-04-15T13:54:24.155 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.ceph-exporter.vm09.log: 94.8% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.ceph-exporter.vm09.log.gz 2026-04-15T13:54:24.156 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-volume.log 2026-04-15T13:54:24.156 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.cephadm.log: 83.5% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph.cephadm.log.gz 2026-04-15T13:54:24.161 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm09.pxnsqu.log 2026-04-15T13:54:24.162 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm09.iwshxg.log: 96.2% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-volume.log.gz 2026-04-15T13:54:24.164 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.1.log 2026-04-15T13:54:24.164 INFO:teuthology.orchestra.run.vm09.stderr: 92.1% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm09.iwshxg.log.gz 2026-04-15T13:54:24.170 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.2.log 2026-04-15T13:54:24.172 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-volume.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm06.landug.log 2026-04-15T13:54:24.172 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm09.pxnsqu.log: 93.6% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ops-log-ceph-client.rgw.foo.vm09.pxnsqu.log.gz 2026-04-15T13:54:24.172 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.6.log 2026-04-15T13:54:24.180 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.1.log: gzip -5 --verbose -- /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.5.log 2026-04-15T13:54:24.186 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.2.log: gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-15T13:54:24.191 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm06.landug.log: 92.4% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-client.rgw.foo.vm06.landug.log.gz 2026-04-15T13:54:24.194 INFO:teuthology.orchestra.run.vm06.stderr: 96.2% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-volume.log.gz 2026-04-15T13:54:24.196 INFO:teuthology.orchestra.run.vm09.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.6.log: /var/log/ceph/cephadm.log: 90.8% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-15T13:54:24.201 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /var/log/ceph/cephadm.log 2026-04-15T13:54:24.245 INFO:teuthology.orchestra.run.vm06.stderr:/var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.5.log: /var/log/ceph/cephadm.log: 91.7% -- replaced with /var/log/ceph/cephadm.log.gz 2026-04-15T13:54:24.281 INFO:teuthology.orchestra.run.vm09.stderr: 92.8% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mon.vm09.log.gz 2026-04-15T13:54:24.340 INFO:teuthology.orchestra.run.vm06.stderr: 89.9% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mgr.vm06.qbbldl.log.gz 2026-04-15T13:54:24.687 INFO:teuthology.orchestra.run.vm09.stderr: 93.3% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.2.log.gz 2026-04-15T13:54:24.739 INFO:teuthology.orchestra.run.vm06.stderr: 90.8% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-mon.vm06.log.gz 2026-04-15T13:54:24.760 INFO:teuthology.orchestra.run.vm09.stderr: 93.3% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.0.log.gz 2026-04-15T13:54:24.841 INFO:teuthology.orchestra.run.vm09.stderr: 93.3% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.4.log.gz 2026-04-15T13:54:24.867 INFO:teuthology.orchestra.run.vm09.stderr: 93.2% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.6.log.gz 2026-04-15T13:54:24.867 INFO:teuthology.orchestra.run.vm06.stderr: 93.4% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.7.log.gz 2026-04-15T13:54:24.868 INFO:teuthology.orchestra.run.vm09.stderr: 2026-04-15T13:54:24.868 INFO:teuthology.orchestra.run.vm09.stderr:real 0m0.748s 2026-04-15T13:54:24.868 INFO:teuthology.orchestra.run.vm09.stderr:user 0m2.515s 2026-04-15T13:54:24.868 INFO:teuthology.orchestra.run.vm09.stderr:sys 0m0.131s 2026-04-15T13:54:24.957 INFO:teuthology.orchestra.run.vm06.stderr: 93.4% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.1.log.gz 2026-04-15T13:54:24.961 INFO:teuthology.orchestra.run.vm06.stderr: 93.3% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.5.log.gz 2026-04-15T13:54:24.983 INFO:teuthology.orchestra.run.vm06.stderr: 93.6% -- replaced with /var/log/ceph/75e42418-38cf-11f1-9300-4fe77ac4445b/ceph-osd.3.log.gz 2026-04-15T13:54:24.984 INFO:teuthology.orchestra.run.vm06.stderr: 2026-04-15T13:54:24.984 INFO:teuthology.orchestra.run.vm06.stderr:real 0m0.866s 2026-04-15T13:54:24.984 INFO:teuthology.orchestra.run.vm06.stderr:user 0m3.058s 2026-04-15T13:54:24.984 INFO:teuthology.orchestra.run.vm06.stderr:sys 0m0.209s 2026-04-15T13:54:24.984 INFO:tasks.cephadm:Archiving logs... 2026-04-15T13:54:24.984 DEBUG:teuthology.misc:Transferring archived files from vm06:/var/log/ceph to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353/remote/vm06/log 2026-04-15T13:54:24.985 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-15T13:54:25.171 DEBUG:teuthology.misc:Transferring archived files from vm09:/var/log/ceph to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353/remote/vm09/log 2026-04-15T13:54:25.171 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /var/log/ceph -- . 2026-04-15T13:54:25.297 INFO:tasks.cephadm:Removing cluster... 2026-04-15T13:54:25.297 DEBUG:teuthology.orchestra.run.vm06:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b --force 2026-04-15T13:54:25.548 INFO:teuthology.orchestra.run.vm06.stdout:Deleting cluster with fsid: 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:54:25.602 DEBUG:teuthology.orchestra.run.vm09:> sudo /home/ubuntu/cephtest/cephadm rm-cluster --fsid 75e42418-38cf-11f1-9300-4fe77ac4445b --force 2026-04-15T13:54:25.839 INFO:teuthology.orchestra.run.vm09.stdout:Deleting cluster with fsid: 75e42418-38cf-11f1-9300-4fe77ac4445b 2026-04-15T13:54:25.887 INFO:tasks.cephadm:Removing cephadm ... 2026-04-15T13:54:25.887 DEBUG:teuthology.orchestra.run.vm06:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-15T13:54:25.890 DEBUG:teuthology.orchestra.run.vm09:> rm -rf /home/ubuntu/cephtest/cephadm 2026-04-15T13:54:25.893 INFO:tasks.cephadm:Teardown complete 2026-04-15T13:54:25.893 DEBUG:teuthology.run_tasks:Unwinding manager nvme_loop 2026-04-15T13:54:25.896 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vg_nvme/lv_1... 2026-04-15T13:54:25.896 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n lv_1 2026-04-15T13:54:26.112 INFO:teuthology.orchestra.run.vm06.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-15T13:54:26.113 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vg_nvme/lv_2... 2026-04-15T13:54:26.113 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n lv_2 2026-04-15T13:54:26.248 INFO:teuthology.orchestra.run.vm06.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-15T13:54:26.248 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vg_nvme/lv_3... 2026-04-15T13:54:26.248 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n lv_3 2026-04-15T13:54:26.400 INFO:teuthology.orchestra.run.vm06.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-15T13:54:26.401 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm06:/dev/vg_nvme/lv_4... 2026-04-15T13:54:26.401 DEBUG:teuthology.orchestra.run.vm06:> sudo nvme disconnect -n lv_4 2026-04-15T13:54:26.552 INFO:teuthology.orchestra.run.vm06.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-15T13:54:26.553 DEBUG:teuthology.orchestra.run.vm06:> set -ex 2026-04-15T13:54:26.553 DEBUG:teuthology.orchestra.run.vm06:> sudo dd of=/scratch_devs 2026-04-15T13:54:26.561 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vg_nvme/lv_1... 2026-04-15T13:54:26.561 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n lv_1 2026-04-15T13:54:26.730 INFO:teuthology.orchestra.run.vm09.stdout:NQN:lv_1 disconnected 1 controller(s) 2026-04-15T13:54:26.731 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vg_nvme/lv_2... 2026-04-15T13:54:26.731 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n lv_2 2026-04-15T13:54:26.890 INFO:teuthology.orchestra.run.vm09.stdout:NQN:lv_2 disconnected 1 controller(s) 2026-04-15T13:54:26.891 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vg_nvme/lv_3... 2026-04-15T13:54:26.891 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n lv_3 2026-04-15T13:54:27.046 INFO:teuthology.orchestra.run.vm09.stdout:NQN:lv_3 disconnected 1 controller(s) 2026-04-15T13:54:27.046 INFO:tasks.nvme_loop:Disconnecting nvme_loop vm09:/dev/vg_nvme/lv_4... 2026-04-15T13:54:27.047 DEBUG:teuthology.orchestra.run.vm09:> sudo nvme disconnect -n lv_4 2026-04-15T13:54:27.210 INFO:teuthology.orchestra.run.vm09.stdout:NQN:lv_4 disconnected 1 controller(s) 2026-04-15T13:54:27.211 DEBUG:teuthology.orchestra.run.vm09:> set -ex 2026-04-15T13:54:27.211 DEBUG:teuthology.orchestra.run.vm09:> sudo dd of=/scratch_devs 2026-04-15T13:54:27.219 DEBUG:teuthology.run_tasks:Unwinding manager clock 2026-04-15T13:54:27.221 INFO:teuthology.task.clock:Checking final clock skew... 2026-04-15T13:54:27.221 DEBUG:teuthology.orchestra.run.vm06:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T13:54:27.222 DEBUG:teuthology.orchestra.run.vm09:> PATH=/usr/bin:/usr/sbin ntpq -p || PATH=/usr/bin:/usr/sbin chronyc sources || true 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout: remote refid st t when poll reach delay offset jitter 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:============================================================================== 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:*178.215.228.24 189.97.54.122 2 u 43 64 377 21.966 -0.224 0.778 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-hydra.mybb.de 189.97.54.122 2 u 50 128 377 28.291 -2.458 0.274 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-static.179.181. 213.239.239.166 3 u 114 128 377 23.523 +0.499 0.240 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-mailcow.f5s.de 131.188.3.223 2 u 118 128 377 24.999 +0.713 0.239 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-ntp01.srv-pro.d 91.38.219.194 2 u 52 128 377 24.226 -1.769 0.123 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:+130.162.222.153 251.6.180.128 3 u 42 128 377 27.769 -0.748 1.627 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-static.106.198. 181.22.208.20 2 u 52 64 377 23.693 -0.837 0.245 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-ntp02.plutex.de .PPS. 1 u 126 128 377 31.358 +3.691 0.254 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:-time.cloudflare 10.164.8.4 3 u 49 64 377 20.450 +1.702 0.127 2026-04-15T13:54:27.456 INFO:teuthology.orchestra.run.vm09.stdout:+185.125.190.58 99.220.8.133 2 u 26 128 377 35.884 -0.211 0.300 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout: remote refid st t when poll reach delay offset jitter 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:============================================================================== 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout: 0.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout: 1.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout: 2.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout: 3.ubuntu.pool.n .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout: ntp.ubuntu.com .POOL. 16 p - 64 0 0.000 +0.000 0.000 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:+178.215.228.24 189.97.54.122 2 u 38 64 377 21.909 +3.204 0.441 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:#p3.dianacht.de 240.67.35.84 3 u 51 64 377 32.680 +4.154 0.485 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:*nbg01.muxx.net 189.97.54.122 2 u 40 64 377 23.562 +2.770 0.314 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:#mail.sassmann.n 192.53.103.103 2 u 42 64 377 23.606 +4.594 0.325 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:+static.106.198. 181.22.208.20 2 u 52 64 377 23.675 +2.524 1.475 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-time.cloudflare 10.163.8.4 3 u 45 64 377 20.460 +5.007 0.482 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-static.179.181. 213.239.239.166 3 u 42 64 377 23.542 +3.908 0.594 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:#hydra.mybb.de 189.97.54.122 2 u 42 64 377 28.289 +0.554 0.298 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-ntp01.srv-pro.d 91.38.219.194 2 u 49 64 377 23.949 +1.465 0.378 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-mailcow.f5s.de 131.188.3.223 2 u 39 128 377 25.037 +3.440 0.403 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-coloring.acul.m 237.17.204.95 2 u 44 128 377 25.090 +4.303 0.637 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-130.162.222.153 251.6.180.128 3 u 44 64 377 27.801 +2.160 1.996 2026-04-15T13:54:27.527 INFO:teuthology.orchestra.run.vm06.stdout:-ntp02.plutex.de .PPS. 1 u 48 128 377 31.317 +7.063 0.315 2026-04-15T13:54:27.528 DEBUG:teuthology.run_tasks:Unwinding manager ansible.cephlab 2026-04-15T13:54:27.530 INFO:teuthology.task.ansible:Skipping ansible cleanup... 2026-04-15T13:54:27.530 DEBUG:teuthology.run_tasks:Unwinding manager selinux 2026-04-15T13:54:27.532 DEBUG:teuthology.run_tasks:Unwinding manager pcp 2026-04-15T13:54:27.535 DEBUG:teuthology.run_tasks:Unwinding manager internal.timer 2026-04-15T13:54:27.537 INFO:teuthology.task.internal:Duration was 1564.599553 seconds 2026-04-15T13:54:27.537 DEBUG:teuthology.run_tasks:Unwinding manager internal.syslog 2026-04-15T13:54:27.539 INFO:teuthology.task.internal.syslog:Shutting down syslog monitoring... 2026-04-15T13:54:27.539 DEBUG:teuthology.orchestra.run.vm06:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-15T13:54:27.540 DEBUG:teuthology.orchestra.run.vm09:> sudo rm -f -- /etc/rsyslog.d/80-cephtest.conf && sudo service rsyslog restart 2026-04-15T13:54:27.566 INFO:teuthology.task.internal.syslog:Checking logs for errors... 2026-04-15T13:54:27.566 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm06.local 2026-04-15T13:54:27.566 DEBUG:teuthology.orchestra.run.vm06:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-15T13:54:27.616 DEBUG:teuthology.task.internal.syslog:Checking ubuntu@vm09.local 2026-04-15T13:54:27.616 DEBUG:teuthology.orchestra.run.vm09:> grep -E --binary-files=text '\bBUG\b|\bINFO\b|\bDEADLOCK\b' /home/ubuntu/cephtest/archive/syslog/kern.log | grep -v 'task .* blocked for more than .* seconds' | grep -v 'lockdep is turned off' | grep -v 'trying to register non-static key' | grep -v 'DEBUG: fsize' | grep -v CRON | grep -v 'BUG: bad unlock balance detected' | grep -v 'inconsistent lock state' | grep -v '*** DEADLOCK ***' | grep -v 'INFO: possible irq lock inversion dependency detected' | grep -v 'INFO: NMI handler (perf_event_nmi_handler) took too long to run' | grep -v 'INFO: recovery required on readonly' | grep -v 'ceph-create-keys: INFO' | grep -v INFO:ceph-create-keys | grep -v 'Loaded datasource DataSourceOpenStack' | grep -v 'container-storage-setup: INFO: Volume group backing root filesystem could not be determined' | grep -E -v '\bsalt-master\b|\bsalt-minion\b|\bsalt-api\b' | grep -v ceph-crash | grep -E -v '\btcmu-runner\b.*\bINFO\b' | head -n 1 2026-04-15T13:54:27.624 INFO:teuthology.task.internal.syslog:Gathering journactl... 2026-04-15T13:54:27.624 DEBUG:teuthology.orchestra.run.vm06:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T13:54:27.660 DEBUG:teuthology.orchestra.run.vm09:> sudo journalctl > /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T13:54:27.738 INFO:teuthology.task.internal.syslog:Compressing syslogs... 2026-04-15T13:54:27.738 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T13:54:27.739 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest/archive/syslog -name '*.log' -print0 | sudo xargs -0 --max-args=1 --max-procs=0 --verbose --no-run-if-empty -- gzip -5 --verbose -- 2026-04-15T13:54:27.746 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T13:54:27.746 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/kern.log 2026-04-15T13:54:27.746 INFO:teuthology.orchestra.run.vm09.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm09.stderr: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/misc.log 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/kern.log: 0.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/kern.log.gz 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm06.stderr:gzip -5 --verbose -- /home/ubuntu/cephtest/archive/syslog/journalctl.log 2026-04-15T13:54:27.747 INFO:teuthology.orchestra.run.vm06.stderr:/home/ubuntu/cephtest/archive/syslog/misc.log: 0.0%/home/ubuntu/cephtest/archive/syslog/journalctl.log: -- replaced with /home/ubuntu/cephtest/archive/syslog/misc.log.gz 2026-04-15T13:54:27.760 INFO:teuthology.orchestra.run.vm09.stderr:/home/ubuntu/cephtest/archive/syslog/journalctl.log: 92.0% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-15T13:54:27.765 INFO:teuthology.orchestra.run.vm06.stderr: 90.9% -- replaced with /home/ubuntu/cephtest/archive/syslog/journalctl.log.gz 2026-04-15T13:54:27.766 DEBUG:teuthology.run_tasks:Unwinding manager internal.sudo 2026-04-15T13:54:27.770 INFO:teuthology.task.internal:Restoring /etc/sudoers... 2026-04-15T13:54:27.770 DEBUG:teuthology.orchestra.run.vm06:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-15T13:54:27.815 DEBUG:teuthology.orchestra.run.vm09:> sudo mv -f /etc/sudoers.orig.teuthology /etc/sudoers 2026-04-15T13:54:27.824 DEBUG:teuthology.run_tasks:Unwinding manager internal.coredump 2026-04-15T13:54:27.827 DEBUG:teuthology.orchestra.run.vm06:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-15T13:54:27.860 DEBUG:teuthology.orchestra.run.vm09:> sudo sysctl -w kernel.core_pattern=core && sudo bash -c 'for f in `find /home/ubuntu/cephtest/archive/coredump -type f`; do file $f | grep -q systemd-sysusers && rm $f || true ; done' && rmdir --ignore-fail-on-non-empty -- /home/ubuntu/cephtest/archive/coredump 2026-04-15T13:54:27.867 INFO:teuthology.orchestra.run.vm06.stdout:kernel.core_pattern = core 2026-04-15T13:54:27.872 INFO:teuthology.orchestra.run.vm09.stdout:kernel.core_pattern = core 2026-04-15T13:54:27.880 DEBUG:teuthology.orchestra.run.vm06:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-15T13:54:27.922 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:54:27.922 DEBUG:teuthology.orchestra.run.vm09:> test -e /home/ubuntu/cephtest/archive/coredump 2026-04-15T13:54:27.925 DEBUG:teuthology.orchestra.run:got remote process result: 1 2026-04-15T13:54:27.925 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive 2026-04-15T13:54:27.928 INFO:teuthology.task.internal:Transferring archived files... 2026-04-15T13:54:27.928 DEBUG:teuthology.misc:Transferring archived files from vm06:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353/remote/vm06 2026-04-15T13:54:27.928 DEBUG:teuthology.orchestra.run.vm06:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-15T13:54:27.971 DEBUG:teuthology.misc:Transferring archived files from vm09:/home/ubuntu/cephtest/archive to /archive/supriti-2026-04-15_10:39:10-orch:cephadm:smoke-roleless-wip-sse-s3-on-v20.2.0-none-default-vps/5353/remote/vm09 2026-04-15T13:54:27.972 DEBUG:teuthology.orchestra.run.vm09:> sudo tar c -f - -C /home/ubuntu/cephtest/archive -- . 2026-04-15T13:54:27.980 INFO:teuthology.task.internal:Removing archive directory... 2026-04-15T13:54:27.980 DEBUG:teuthology.orchestra.run.vm06:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-15T13:54:28.016 DEBUG:teuthology.orchestra.run.vm09:> rm -rf -- /home/ubuntu/cephtest/archive 2026-04-15T13:54:28.024 DEBUG:teuthology.run_tasks:Unwinding manager internal.archive_upload 2026-04-15T13:54:28.027 INFO:teuthology.task.internal:Not uploading archives. 2026-04-15T13:54:28.027 DEBUG:teuthology.run_tasks:Unwinding manager internal.base 2026-04-15T13:54:28.030 INFO:teuthology.task.internal:Tidying up after the test... 2026-04-15T13:54:28.030 DEBUG:teuthology.orchestra.run.vm06:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-15T13:54:28.060 DEBUG:teuthology.orchestra.run.vm09:> find /home/ubuntu/cephtest -ls ; rmdir -- /home/ubuntu/cephtest 2026-04-15T13:54:28.062 INFO:teuthology.orchestra.run.vm06.stdout: 258069 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Apr 15 13:54 /home/ubuntu/cephtest 2026-04-15T13:54:28.069 INFO:teuthology.orchestra.run.vm09.stdout: 258068 4 drwxr-xr-x 2 ubuntu ubuntu 4096 Apr 15 13:54 /home/ubuntu/cephtest 2026-04-15T13:54:28.069 DEBUG:teuthology.run_tasks:Unwinding manager console_log 2026-04-15T13:54:28.075 INFO:teuthology.run:Summary data: description: orch:cephadm:smoke-roleless/{0-distro/ubuntu_22.04 0-nvme-loop 1-start 2-services/rgw-ingress 3-final} duration: 1564.5995526313782 owner: supriti success: true 2026-04-15T13:54:28.075 DEBUG:teuthology.report:Pushing job info to http://localhost:8080 2026-04-15T13:54:28.092 INFO:teuthology.run:pass